aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 12:01:28 -0500
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 12:01:28 -0500
commit9db73724453a9350e1c22dbe732d427e2939a5c9 (patch)
tree15e3ead6413ae97398a54292acc199bee0864d42 /drivers
parent4c1ac1b49122b805adfa4efc620592f68dccf5db (diff)
parente62438630ca37539c8cc1553710bbfaa3cf960a7 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/ata/libata-scsi.c include/linux/libata.h Futher merge of Linus's head and compilation fixups. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/ata/Kconfig35
-rw-r--r--drivers/ata/Makefile4
-rw-r--r--drivers/ata/ahci.c229
-rw-r--r--drivers/ata/ata_generic.c11
-rw-r--r--drivers/ata/ata_piix.c194
-rw-r--r--drivers/ata/libata-core.c536
-rw-r--r--drivers/ata/libata-eh.c108
-rw-r--r--drivers/ata/libata-scsi.c328
-rw-r--r--drivers/ata/libata-sff.c43
-rw-r--r--drivers/ata/libata.h24
-rw-r--r--drivers/ata/pata_ali.c140
-rw-r--r--drivers/ata/pata_amd.c26
-rw-r--r--drivers/ata/pata_artop.c1
-rw-r--r--drivers/ata/pata_atiixp.c9
-rw-r--r--drivers/ata/pata_cmd64x.c23
-rw-r--r--drivers/ata/pata_cs5520.c25
-rw-r--r--drivers/ata/pata_cs5530.c100
-rw-r--r--drivers/ata/pata_cs5535.c9
-rw-r--r--drivers/ata/pata_cypress.c9
-rw-r--r--drivers/ata/pata_efar.c7
-rw-r--r--drivers/ata/pata_hpt366.c58
-rw-r--r--drivers/ata/pata_hpt37x.c1
-rw-r--r--drivers/ata/pata_hpt3x2n.c1
-rw-r--r--drivers/ata/pata_hpt3x3.c47
-rw-r--r--drivers/ata/pata_isapnp.c1
-rw-r--r--drivers/ata/pata_it821x.c19
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c271
-rw-r--r--drivers/ata/pata_jmicron.c36
-rw-r--r--drivers/ata/pata_legacy.c1
-rw-r--r--drivers/ata/pata_marvell.c224
-rw-r--r--drivers/ata/pata_mpiix.c9
-rw-r--r--drivers/ata/pata_netcell.c8
-rw-r--r--drivers/ata/pata_ns87410.c9
-rw-r--r--drivers/ata/pata_oldpiix.c5
-rw-r--r--drivers/ata/pata_opti.c34
-rw-r--r--drivers/ata/pata_optidma.c9
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_pdc2027x.c3
-rw-r--r--drivers/ata/pata_pdc202xx_old.c65
-rw-r--r--drivers/ata/pata_platform.c295
-rw-r--r--drivers/ata/pata_qdi.c1
-rw-r--r--drivers/ata/pata_radisys.c5
-rw-r--r--drivers/ata/pata_rz1000.c49
-rw-r--r--drivers/ata/pata_sc1200.c9
-rw-r--r--drivers/ata/pata_serverworks.c33
-rw-r--r--drivers/ata/pata_sil680.c83
-rw-r--r--drivers/ata/pata_sis.c7
-rw-r--r--drivers/ata/pata_sl82c105.c1
-rw-r--r--drivers/ata/pata_triflex.c9
-rw-r--r--drivers/ata/pata_via.c109
-rw-r--r--drivers/ata/pata_winbond.c306
-rw-r--r--drivers/ata/sata_nv.c1044
-rw-r--r--drivers/ata/sata_promise.c46
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c18
-rw-r--r--drivers/ata/sata_sis.c17
-rw-r--r--drivers/base/cpu.c1
-rw-r--r--drivers/block/viodasd.c49
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/hwmon/abituguru.c1
-rw-r--r--drivers/hwmon/hdaps.c1
-rw-r--r--drivers/ide/pci/via82cxxx.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c1
-rw-r--r--drivers/isdn/divert/isdn_divert.c2
-rw-r--r--drivers/leds/ledtrig-ide-disk.c1
-rw-r--r--drivers/leds/ledtrig-timer.c1
-rw-r--r--drivers/macintosh/Kconfig7
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/rack-meter.c612
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c5
-rw-r--r--drivers/macintosh/therm_windtunnel.c7
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c1
-rw-r--r--drivers/net/ehea/ehea_qmr.c1
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h6
-rw-r--r--drivers/net/ibmveth.c4
-rw-r--r--drivers/net/ibmveth.h1
-rw-r--r--drivers/net/lance.c1
-rw-r--r--drivers/net/ne3210.c1
-rw-r--r--drivers/net/phy/fixed.c2
-rw-r--r--drivers/net/sk98lin/skge.c1
-rw-r--r--drivers/net/spider_net.c18
-rw-r--r--drivers/net/starfire.c1
-rw-r--r--drivers/net/sun3lance.c1
-rw-r--r--drivers/net/sungem.c1
-rw-r--r--drivers/net/sunhme.c6
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tulip/de4x5.c8
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/pci/access.c1
-rw-r--r--drivers/ps3/Makefile1
-rw-r--r--drivers/ps3/system-bus.c362
-rw-r--r--drivers/s390/block/dasd.c16
-rw-r--r--drivers/s390/block/dasd_devmap.c36
-rw-r--r--drivers/s390/char/con3215.c50
-rw-r--r--drivers/s390/char/sclp_quiesce.c37
-rw-r--r--drivers/s390/cio/chsc.c82
-rw-r--r--drivers/s390/cio/cio.c128
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/device_fsm.c27
-rw-r--r--drivers/s390/cio/device_id.c10
-rw-r--r--drivers/s390/cio/device_pgid.c30
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/qdio.c8
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/net/lcs.c78
-rw-r--r--drivers/s390/net/lcs.h25
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c6
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c469
-rw-r--r--drivers/spi/spi_butterfly.c1
-rw-r--r--drivers/video/platinumfb.c5
-rw-r--r--drivers/w1/slaves/w1_therm.c1
122 files changed, 5600 insertions, 1285 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 4ac14dab3079..67711770b1d9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_CRYPTO) += crypto/
77obj-$(CONFIG_SUPERH) += sh/ 77obj-$(CONFIG_SUPERH) += sh/
78obj-$(CONFIG_GENERIC_TIME) += clocksource/ 78obj-$(CONFIG_GENERIC_TIME) += clocksource/
79obj-$(CONFIG_DMA_ENGINE) += dma/ 79obj-$(CONFIG_DMA_ENGINE) += dma/
80obj-$(CONFIG_PPC_PS3) += ps3/
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 578b99b71d9c..bf5b79ed3613 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/jiffies.h>
30#include <acpi/acpi_bus.h> 31#include <acpi/acpi_bus.h>
31#include <acpi/acpi_drivers.h> 32#include <acpi/acpi_drivers.h>
32 33
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 03f6338acc8f..984ab284382a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -328,6 +328,15 @@ config PATA_TRIFLEX
328 328
329 If unsure, say N. 329 If unsure, say N.
330 330
331config PATA_MARVELL
332 tristate "Marvell PATA support via legacy mode"
333 depends on PCI
334 help
335 This option enables limited support for the Marvell 88SE6145 ATA
336 controller.
337
338 If unsure, say N.
339
331config PATA_MPIIX 340config PATA_MPIIX
332 tristate "Intel PATA MPIIX support" 341 tristate "Intel PATA MPIIX support"
333 depends on PCI 342 depends on PCI
@@ -483,6 +492,32 @@ config PATA_WINBOND
483 492
484 If unsure, say N. 493 If unsure, say N.
485 494
495config PATA_WINBOND_VLB
496 tristate "Winbond W83759A VLB PATA support (Experimental)"
497 depends on ISA && EXPERIMENTAL
498 help
499 Support for the Winbond W83759A controller on Vesa Local Bus
500 systems.
501
502config PATA_PLATFORM
503 tristate "Generic platform device PATA support"
504 depends on EMBEDDED
505 help
506 This option enables support for generic directly connected ATA
507 devices commonly found on embedded systems.
508
509 If unsure, say N.
510
511config PATA_IXP4XX_CF
512 tristate "IXP4XX Compact Flash support"
513 depends on ARCH_IXP4XX
514 help
515 This option enables support for a Compact Flash connected on
516 the ixp4xx expansion bus. This driver had been written for
517 Loft/Avila boards in mind but can work with others.
518
519 If unsure, say N.
520
486endif 521endif
487endmenu 522endmenu
488 523
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 72243a677f9b..bc3d81ae757e 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o 38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
39obj-$(CONFIG_PATA_OPTI) += pata_opti.o 39obj-$(CONFIG_PATA_OPTI) += pata_opti.o
40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o 40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
41obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
41obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o 42obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
42obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 43obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
43obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o 44obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
@@ -51,8 +52,11 @@ obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
51obj-$(CONFIG_PATA_SIL680) += pata_sil680.o 52obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
52obj-$(CONFIG_PATA_VIA) += pata_via.o 53obj-$(CONFIG_PATA_VIA) += pata_via.o
53obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o 54obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
55obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
54obj-$(CONFIG_PATA_SIS) += pata_sis.o 56obj-$(CONFIG_PATA_SIS) += pata_sis.o
55obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 57obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
58obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
59obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
56# Should be last but one libata driver 60# Should be last but one libata driver
57obj-$(CONFIG_ATA_GENERIC) += ata_generic.o 61obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
58# Should be last libata driver 62# Should be last libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index bddb14e91d3c..f36da488a2c1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
53 53
54enum { 54enum {
55 AHCI_PCI_BAR = 5, 55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_PORTS = 32,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */ 57 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff, 58 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0, 59 AHCI_USE_CLUSTERING = 0,
@@ -77,8 +78,9 @@ enum {
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 78 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78 79
79 board_ahci = 0, 80 board_ahci = 0,
80 board_ahci_vt8251 = 1, 81 board_ahci_pi = 1,
81 board_ahci_ign_iferr = 2, 82 board_ahci_vt8251 = 2,
83 board_ahci_ign_iferr = 3,
82 84
83 /* global controller registers */ 85 /* global controller registers */
84 HOST_CAP = 0x00, /* host capabilities */ 86 HOST_CAP = 0x00, /* host capabilities */
@@ -167,9 +169,9 @@ enum {
167 AHCI_FLAG_MSI = (1 << 0), 169 AHCI_FLAG_MSI = (1 << 0),
168 170
169 /* ap->flags bits */ 171 /* ap->flags bits */
170 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24), 172 AHCI_FLAG_NO_NCQ = (1 << 24),
171 AHCI_FLAG_NO_NCQ = (1 << 25), 173 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
172 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 26), /* ignore IRQ_IF_ERR */ 174 AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */
173}; 175};
174 176
175struct ahci_cmd_hdr { 177struct ahci_cmd_hdr {
@@ -216,6 +218,7 @@ static u8 ahci_check_status(struct ata_port *ap);
216static void ahci_freeze(struct ata_port *ap); 218static void ahci_freeze(struct ata_port *ap);
217static void ahci_thaw(struct ata_port *ap); 219static void ahci_thaw(struct ata_port *ap);
218static void ahci_error_handler(struct ata_port *ap); 220static void ahci_error_handler(struct ata_port *ap);
221static void ahci_vt8251_error_handler(struct ata_port *ap);
219static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 222static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
220static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 223static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
221static int ahci_port_resume(struct ata_port *ap); 224static int ahci_port_resume(struct ata_port *ap);
@@ -275,6 +278,37 @@ static const struct ata_port_operations ahci_ops = {
275 .port_stop = ahci_port_stop, 278 .port_stop = ahci_port_stop,
276}; 279};
277 280
281static const struct ata_port_operations ahci_vt8251_ops = {
282 .port_disable = ata_port_disable,
283
284 .check_status = ahci_check_status,
285 .check_altstatus = ahci_check_status,
286 .dev_select = ata_noop_dev_select,
287
288 .tf_read = ahci_tf_read,
289
290 .qc_prep = ahci_qc_prep,
291 .qc_issue = ahci_qc_issue,
292
293 .irq_handler = ahci_interrupt,
294 .irq_clear = ahci_irq_clear,
295
296 .scr_read = ahci_scr_read,
297 .scr_write = ahci_scr_write,
298
299 .freeze = ahci_freeze,
300 .thaw = ahci_thaw,
301
302 .error_handler = ahci_vt8251_error_handler,
303 .post_internal_cmd = ahci_post_internal_cmd,
304
305 .port_suspend = ahci_port_suspend,
306 .port_resume = ahci_port_resume,
307
308 .port_start = ahci_port_start,
309 .port_stop = ahci_port_stop,
310};
311
278static const struct ata_port_info ahci_port_info[] = { 312static const struct ata_port_info ahci_port_info[] = {
279 /* board_ahci */ 313 /* board_ahci */
280 { 314 {
@@ -286,16 +320,26 @@ static const struct ata_port_info ahci_port_info[] = {
286 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 320 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
287 .port_ops = &ahci_ops, 321 .port_ops = &ahci_ops,
288 }, 322 },
323 /* board_ahci_pi */
324 {
325 .sht = &ahci_sht,
326 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
327 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
328 ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI,
329 .pio_mask = 0x1f, /* pio0-4 */
330 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
331 .port_ops = &ahci_ops,
332 },
289 /* board_ahci_vt8251 */ 333 /* board_ahci_vt8251 */
290 { 334 {
291 .sht = &ahci_sht, 335 .sht = &ahci_sht,
292 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 336 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
293 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 337 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
294 ATA_FLAG_SKIP_D2H_BSY | 338 ATA_FLAG_SKIP_D2H_BSY |
295 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, 339 ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ,
296 .pio_mask = 0x1f, /* pio0-4 */ 340 .pio_mask = 0x1f, /* pio0-4 */
297 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 341 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
298 .port_ops = &ahci_ops, 342 .port_ops = &ahci_vt8251_ops,
299 }, 343 },
300 /* board_ahci_ign_iferr */ 344 /* board_ahci_ign_iferr */
301 { 345 {
@@ -322,22 +366,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
322 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 366 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
323 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 367 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
324 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 368 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
325 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 369 { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
326 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 370 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
327 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 371 { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
328 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 372 { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
329 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 373 { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
330 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 374 { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
331 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 375 { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
332 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 376 { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
333 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 377 { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
334 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 378 { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
335 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 379 { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
336 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 380 { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
337 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 381 { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
338 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 382 { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
339 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 383 { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
340 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 384 { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
341 385
342 /* JMicron */ 386 /* JMicron */
343 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */ 387 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
@@ -372,6 +416,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
372 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 416 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
373 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 417 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
374 418
419 /* Generic, PCI class code for AHCI */
420 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
421 0x010601, 0xffffff, board_ahci },
422
375 { } /* terminate list */ 423 { } /* terminate list */
376}; 424};
377 425
@@ -386,6 +434,11 @@ static struct pci_driver ahci_pci_driver = {
386}; 434};
387 435
388 436
437static inline int ahci_nr_ports(u32 cap)
438{
439 return (cap & 0x1f) + 1;
440}
441
389static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port) 442static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
390{ 443{
391 return base + 0x100 + (port * 0x80); 444 return base + 0x100 + (port * 0x80);
@@ -559,9 +612,6 @@ static void ahci_power_down(void __iomem *port_mmio, u32 cap)
559static void ahci_init_port(void __iomem *port_mmio, u32 cap, 612static void ahci_init_port(void __iomem *port_mmio, u32 cap,
560 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma) 613 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
561{ 614{
562 /* power up */
563 ahci_power_up(port_mmio, cap);
564
565 /* enable FIS reception */ 615 /* enable FIS reception */
566 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma); 616 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
567 617
@@ -587,19 +637,17 @@ static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
587 return rc; 637 return rc;
588 } 638 }
589 639
590 /* put device into slumber mode */
591 ahci_power_down(port_mmio, cap);
592
593 return 0; 640 return 0;
594} 641}
595 642
596static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev) 643static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
597{ 644{
598 u32 cap_save, tmp; 645 u32 cap_save, impl_save, tmp;
599 646
600 cap_save = readl(mmio + HOST_CAP); 647 cap_save = readl(mmio + HOST_CAP);
601 cap_save &= ( (1<<28) | (1<<17) ); 648 cap_save &= ( (1<<28) | (1<<17) );
602 cap_save |= (1 << 27); 649 cap_save |= (1 << 27);
650 impl_save = readl(mmio + HOST_PORTS_IMPL);
603 651
604 /* global controller reset */ 652 /* global controller reset */
605 tmp = readl(mmio + HOST_CTL); 653 tmp = readl(mmio + HOST_CTL);
@@ -620,10 +668,21 @@ static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
620 return -EIO; 668 return -EIO;
621 } 669 }
622 670
671 /* turn on AHCI mode */
623 writel(HOST_AHCI_EN, mmio + HOST_CTL); 672 writel(HOST_AHCI_EN, mmio + HOST_CTL);
624 (void) readl(mmio + HOST_CTL); /* flush */ 673 (void) readl(mmio + HOST_CTL); /* flush */
674
675 /* These write-once registers are normally cleared on reset.
676 * Restore BIOS values... which we HOPE were present before
677 * reset.
678 */
679 if (!impl_save) {
680 impl_save = (1 << ahci_nr_ports(cap_save)) - 1;
681 dev_printk(KERN_WARNING, &pdev->dev,
682 "PORTS_IMPL is zero, forcing 0x%x\n", impl_save);
683 }
625 writel(cap_save, mmio + HOST_CAP); 684 writel(cap_save, mmio + HOST_CAP);
626 writel(0xf, mmio + HOST_PORTS_IMPL); 685 writel(impl_save, mmio + HOST_PORTS_IMPL);
627 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 686 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
628 687
629 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 688 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
@@ -639,7 +698,8 @@ static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
639} 698}
640 699
641static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev, 700static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
642 int n_ports, u32 cap) 701 int n_ports, unsigned int port_flags,
702 struct ahci_host_priv *hpriv)
643{ 703{
644 int i, rc; 704 int i, rc;
645 u32 tmp; 705 u32 tmp;
@@ -648,13 +708,12 @@ static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
648 void __iomem *port_mmio = ahci_port_base(mmio, i); 708 void __iomem *port_mmio = ahci_port_base(mmio, i);
649 const char *emsg = NULL; 709 const char *emsg = NULL;
650 710
651#if 0 /* BIOSen initialize this incorrectly */ 711 if ((port_flags & AHCI_FLAG_HONOR_PI) &&
652 if (!(hpriv->port_map & (1 << i))) 712 !(hpriv->port_map & (1 << i)))
653 continue; 713 continue;
654#endif
655 714
656 /* make sure port is not active */ 715 /* make sure port is not active */
657 rc = ahci_deinit_port(port_mmio, cap, &emsg); 716 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
658 if (rc) 717 if (rc)
659 dev_printk(KERN_WARNING, &pdev->dev, 718 dev_printk(KERN_WARNING, &pdev->dev,
660 "%s (%d)\n", emsg, rc); 719 "%s (%d)\n", emsg, rc);
@@ -729,17 +788,6 @@ static int ahci_clo(struct ata_port *ap)
729 return 0; 788 return 0;
730} 789}
731 790
732static int ahci_prereset(struct ata_port *ap)
733{
734 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
735 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
736 /* ATA_BUSY hasn't cleared, so send a CLO */
737 ahci_clo(ap);
738 }
739
740 return ata_std_prereset(ap);
741}
742
743static int ahci_softreset(struct ata_port *ap, unsigned int *class) 791static int ahci_softreset(struct ata_port *ap, unsigned int *class)
744{ 792{
745 struct ahci_port_priv *pp = ap->private_data; 793 struct ahci_port_priv *pp = ap->private_data;
@@ -877,6 +925,31 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
877 return rc; 925 return rc;
878} 926}
879 927
928static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
929{
930 void __iomem *mmio = ap->host->mmio_base;
931 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
932 int rc;
933
934 DPRINTK("ENTER\n");
935
936 ahci_stop_engine(port_mmio);
937
938 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context));
939
940 /* vt8251 needs SError cleared for the port to operate */
941 ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
942
943 ahci_start_engine(port_mmio);
944
945 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
946
947 /* vt8251 doesn't clear BSY on signature FIS reception,
948 * request follow-up softreset.
949 */
950 return rc ?: -EAGAIN;
951}
952
880static void ahci_postreset(struct ata_port *ap, unsigned int *class) 953static void ahci_postreset(struct ata_port *ap, unsigned int *class)
881{ 954{
882 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 955 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -1196,7 +1269,23 @@ static void ahci_error_handler(struct ata_port *ap)
1196 } 1269 }
1197 1270
1198 /* perform recovery */ 1271 /* perform recovery */
1199 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset, 1272 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
1273 ahci_postreset);
1274}
1275
1276static void ahci_vt8251_error_handler(struct ata_port *ap)
1277{
1278 void __iomem *mmio = ap->host->mmio_base;
1279 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1280
1281 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1282 /* restart engine */
1283 ahci_stop_engine(port_mmio);
1284 ahci_start_engine(port_mmio);
1285 }
1286
1287 /* perform recovery */
1288 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
1200 ahci_postreset); 1289 ahci_postreset);
1201} 1290}
1202 1291
@@ -1226,7 +1315,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1226 int rc; 1315 int rc;
1227 1316
1228 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 1317 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1229 if (rc) { 1318 if (rc == 0)
1319 ahci_power_down(port_mmio, hpriv->cap);
1320 else {
1230 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1321 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1231 ahci_init_port(port_mmio, hpriv->cap, 1322 ahci_init_port(port_mmio, hpriv->cap,
1232 pp->cmd_slot_dma, pp->rx_fis_dma); 1323 pp->cmd_slot_dma, pp->rx_fis_dma);
@@ -1242,6 +1333,7 @@ static int ahci_port_resume(struct ata_port *ap)
1242 void __iomem *mmio = ap->host->mmio_base; 1333 void __iomem *mmio = ap->host->mmio_base;
1243 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1334 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1244 1335
1336 ahci_power_up(port_mmio, hpriv->cap);
1245 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1337 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1246 1338
1247 return 0; 1339 return 0;
@@ -1281,7 +1373,8 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
1281 if (rc) 1373 if (rc)
1282 return rc; 1374 return rc;
1283 1375
1284 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap); 1376 ahci_init_controller(mmio, pdev, host->n_ports,
1377 host->ports[0]->flags, hpriv);
1285 } 1378 }
1286 1379
1287 ata_host_resume(host); 1380 ata_host_resume(host);
@@ -1347,6 +1440,9 @@ static int ahci_port_start(struct ata_port *ap)
1347 1440
1348 ap->private_data = pp; 1441 ap->private_data = pp;
1349 1442
1443 /* power up port */
1444 ahci_power_up(port_mmio, hpriv->cap);
1445
1350 /* initialize port */ 1446 /* initialize port */
1351 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); 1447 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1352 1448
@@ -1393,7 +1489,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1393 struct ahci_host_priv *hpriv = probe_ent->private_data; 1489 struct ahci_host_priv *hpriv = probe_ent->private_data;
1394 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1490 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1395 void __iomem *mmio = probe_ent->mmio_base; 1491 void __iomem *mmio = probe_ent->mmio_base;
1396 unsigned int i, using_dac; 1492 unsigned int i, cap_n_ports, using_dac;
1397 int rc; 1493 int rc;
1398 1494
1399 rc = ahci_reset_controller(mmio, pdev); 1495 rc = ahci_reset_controller(mmio, pdev);
@@ -1402,10 +1498,34 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1402 1498
1403 hpriv->cap = readl(mmio + HOST_CAP); 1499 hpriv->cap = readl(mmio + HOST_CAP);
1404 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL); 1500 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1405 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1; 1501 cap_n_ports = ahci_nr_ports(hpriv->cap);
1406 1502
1407 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n", 1503 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1408 hpriv->cap, hpriv->port_map, probe_ent->n_ports); 1504 hpriv->cap, hpriv->port_map, cap_n_ports);
1505
1506 if (probe_ent->port_flags & AHCI_FLAG_HONOR_PI) {
1507 unsigned int n_ports = cap_n_ports;
1508 u32 port_map = hpriv->port_map;
1509 int max_port = 0;
1510
1511 for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
1512 if (port_map & (1 << i)) {
1513 n_ports--;
1514 port_map &= ~(1 << i);
1515 max_port = i;
1516 } else
1517 probe_ent->dummy_port_mask |= 1 << i;
1518 }
1519
1520 if (n_ports || port_map)
1521 dev_printk(KERN_WARNING, &pdev->dev,
1522 "nr_ports (%u) and implemented port map "
1523 "(0x%x) don't match\n",
1524 cap_n_ports, hpriv->port_map);
1525
1526 probe_ent->n_ports = max_port + 1;
1527 } else
1528 probe_ent->n_ports = cap_n_ports;
1409 1529
1410 using_dac = hpriv->cap & HOST_CAP_64; 1530 using_dac = hpriv->cap & HOST_CAP_64;
1411 if (using_dac && 1531 if (using_dac &&
@@ -1437,7 +1557,8 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1437 for (i = 0; i < probe_ent->n_ports; i++) 1557 for (i = 0; i < probe_ent->n_ports; i++)
1438 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i); 1558 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1439 1559
1440 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap); 1560 ahci_init_controller(mmio, pdev, probe_ent->n_ports,
1561 probe_ent->port_flags, hpriv);
1441 1562
1442 pci_set_master(pdev); 1563 pci_set_master(pdev);
1443 1564
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 4a80ff9312b8..908751d27e76 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
26#include <linux/libata.h> 26#include <linux/libata.h>
27 27
28#define DRV_NAME "ata_generic" 28#define DRV_NAME "ata_generic"
29#define DRV_VERSION "0.2.6" 29#define DRV_VERSION "0.2.10"
30 30
31/* 31/*
32 * A generic parallel ATA driver using libata 32 * A generic parallel ATA driver using libata
@@ -109,7 +109,6 @@ static struct scsi_host_template generic_sht = {
109 .can_queue = ATA_DEF_QUEUE, 109 .can_queue = ATA_DEF_QUEUE,
110 .this_id = ATA_SHT_THIS_ID, 110 .this_id = ATA_SHT_THIS_ID,
111 .sg_tablesize = LIBATA_MAX_PRD, 111 .sg_tablesize = LIBATA_MAX_PRD,
112 .max_sectors = ATA_MAX_SECTORS,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 112 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED, 113 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING, 114 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -118,6 +117,8 @@ static struct scsi_host_template generic_sht = {
118 .slave_configure = ata_scsi_slave_config, 117 .slave_configure = ata_scsi_slave_config,
119 .slave_destroy = ata_scsi_slave_destroy, 118 .slave_destroy = ata_scsi_slave_destroy,
120 .bios_param = ata_std_bios_param, 119 .bios_param = ata_std_bios_param,
120 .resume = ata_scsi_device_resume,
121 .suspend = ata_scsi_device_suspend,
121}; 122};
122 123
123static struct ata_port_operations generic_port_ops = { 124static struct ata_port_operations generic_port_ops = {
@@ -226,12 +227,14 @@ static struct pci_driver ata_generic_pci_driver = {
226 .name = DRV_NAME, 227 .name = DRV_NAME,
227 .id_table = ata_generic, 228 .id_table = ata_generic,
228 .probe = ata_generic_init_one, 229 .probe = ata_generic_init_one,
229 .remove = ata_pci_remove_one 230 .remove = ata_pci_remove_one,
231 .suspend = ata_pci_device_suspend,
232 .resume = ata_pci_device_resume,
230}; 233};
231 234
232static int __init ata_generic_init(void) 235static int __init ata_generic_init(void)
233{ 236{
234 return pci_module_init(&ata_generic_pci_driver); 237 return pci_register_driver(&ata_generic_pci_driver);
235} 238}
236 239
237 240
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 720174d628fa..c7de0bb1591f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -40,7 +40,7 @@
40 * Documentation 40 * Documentation
41 * Publically available from Intel web site. Errata documentation 41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this 42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below.going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The orginal Triton 46 * The chipsets all follow very much the same design. The orginal Triton
@@ -93,7 +93,7 @@
93#include <linux/libata.h> 93#include <linux/libata.h>
94 94
95#define DRV_NAME "ata_piix" 95#define DRV_NAME "ata_piix"
96#define DRV_VERSION "2.00ac6" 96#define DRV_VERSION "2.00ac7"
97 97
98enum { 98enum {
99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 99 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@@ -101,11 +101,13 @@ enum {
101 ICH5_PCS = 0x92, /* port control and status */ 101 ICH5_PCS = 0x92, /* port control and status */
102 PIIX_SCC = 0x0A, /* sub-class code register */ 102 PIIX_SCC = 0x0A, /* sub-class code register */
103 103
104 PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */
105 PIIX_FLAG_SCR = (1 << 26), /* SCR available */ 104 PIIX_FLAG_SCR = (1 << 26), /* SCR available */
106 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ 105 PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
107 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 106 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
108 107
108 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
109 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
110
109 /* combined mode. if set, PATA is channel 0. 111 /* combined mode. if set, PATA is channel 0.
110 * if clear, PATA is channel 1. 112 * if clear, PATA is channel 1.
111 */ 113 */
@@ -122,11 +124,10 @@ enum {
122 ich_pata_100 = 3, /* ICH up to UDMA 100 */ 124 ich_pata_100 = 3, /* ICH up to UDMA 100 */
123 ich_pata_133 = 4, /* ICH up to UDMA 133 */ 125 ich_pata_133 = 4, /* ICH up to UDMA 133 */
124 ich5_sata = 5, 126 ich5_sata = 5,
125 esb_sata = 6, 127 ich6_sata = 6,
126 ich6_sata = 7, 128 ich6_sata_ahci = 7,
127 ich6_sata_ahci = 8, 129 ich6m_sata_ahci = 8,
128 ich6m_sata_ahci = 9, 130 ich8_sata_ahci = 9,
129 ich8_sata_ahci = 10,
130 131
131 /* constants for mapping table */ 132 /* constants for mapping table */
132 P0 = 0, /* port 0 */ 133 P0 = 0, /* port 0 */
@@ -143,13 +144,11 @@ enum {
143struct piix_map_db { 144struct piix_map_db {
144 const u32 mask; 145 const u32 mask;
145 const u16 port_enable; 146 const u16 port_enable;
146 const int present_shift;
147 const int map[][4]; 147 const int map[][4];
148}; 148};
149 149
150struct piix_host_priv { 150struct piix_host_priv {
151 const int *map; 151 const int *map;
152 const struct piix_map_db *map_db;
153}; 152};
154 153
155static int piix_init_one (struct pci_dev *pdev, 154static int piix_init_one (struct pci_dev *pdev,
@@ -214,9 +213,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
214 /* 82801EB (ICH5) */ 213 /* 82801EB (ICH5) */
215 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 214 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
216 /* 6300ESB (ICH5 variant with broken PCS present bits) */ 215 /* 6300ESB (ICH5 variant with broken PCS present bits) */
217 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, 216 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
218 /* 6300ESB pretending RAID */ 217 /* 6300ESB pretending RAID */
219 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, 218 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
220 /* 82801FB/FW (ICH6/ICH6W) */ 219 /* 82801FB/FW (ICH6/ICH6W) */
221 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 220 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
222 /* 82801FR/FRW (ICH6R/ICH6RW) */ 221 /* 82801FR/FRW (ICH6R/ICH6RW) */
@@ -367,7 +366,6 @@ static const struct ata_port_operations piix_sata_ops = {
367static const struct piix_map_db ich5_map_db = { 366static const struct piix_map_db ich5_map_db = {
368 .mask = 0x7, 367 .mask = 0x7,
369 .port_enable = 0x3, 368 .port_enable = 0x3,
370 .present_shift = 4,
371 .map = { 369 .map = {
372 /* PM PS SM SS MAP */ 370 /* PM PS SM SS MAP */
373 { P0, NA, P1, NA }, /* 000b */ 371 { P0, NA, P1, NA }, /* 000b */
@@ -384,7 +382,6 @@ static const struct piix_map_db ich5_map_db = {
384static const struct piix_map_db ich6_map_db = { 382static const struct piix_map_db ich6_map_db = {
385 .mask = 0x3, 383 .mask = 0x3,
386 .port_enable = 0xf, 384 .port_enable = 0xf,
387 .present_shift = 4,
388 .map = { 385 .map = {
389 /* PM PS SM SS MAP */ 386 /* PM PS SM SS MAP */
390 { P0, P2, P1, P3 }, /* 00b */ 387 { P0, P2, P1, P3 }, /* 00b */
@@ -397,7 +394,6 @@ static const struct piix_map_db ich6_map_db = {
397static const struct piix_map_db ich6m_map_db = { 394static const struct piix_map_db ich6m_map_db = {
398 .mask = 0x3, 395 .mask = 0x3,
399 .port_enable = 0x5, 396 .port_enable = 0x5,
400 .present_shift = 4,
401 397
402 /* Map 01b isn't specified in the doc but some notebooks use 398 /* Map 01b isn't specified in the doc but some notebooks use
403 * it anyway. MAP 01b have been spotted on both ICH6M and 399 * it anyway. MAP 01b have been spotted on both ICH6M and
@@ -415,7 +411,6 @@ static const struct piix_map_db ich6m_map_db = {
415static const struct piix_map_db ich8_map_db = { 411static const struct piix_map_db ich8_map_db = {
416 .mask = 0x3, 412 .mask = 0x3,
417 .port_enable = 0x3, 413 .port_enable = 0x3,
418 .present_shift = 8,
419 .map = { 414 .map = {
420 /* PM PS SM SS MAP */ 415 /* PM PS SM SS MAP */
421 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ 416 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
@@ -427,7 +422,6 @@ static const struct piix_map_db ich8_map_db = {
427 422
428static const struct piix_map_db *piix_map_db_table[] = { 423static const struct piix_map_db *piix_map_db_table[] = {
429 [ich5_sata] = &ich5_map_db, 424 [ich5_sata] = &ich5_map_db,
430 [esb_sata] = &ich5_map_db,
431 [ich6_sata] = &ich6_map_db, 425 [ich6_sata] = &ich6_map_db,
432 [ich6_sata_ahci] = &ich6_map_db, 426 [ich6_sata_ahci] = &ich6_map_db,
433 [ich6m_sata_ahci] = &ich6m_map_db, 427 [ich6m_sata_ahci] = &ich6m_map_db,
@@ -438,7 +432,7 @@ static struct ata_port_info piix_port_info[] = {
438 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */ 432 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */
439 { 433 {
440 .sht = &piix_sht, 434 .sht = &piix_sht,
441 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 435 .flags = PIIX_PATA_FLAGS,
442 .pio_mask = 0x1f, /* pio0-4 */ 436 .pio_mask = 0x1f, /* pio0-4 */
443 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 437 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
444 .udma_mask = ATA_UDMA_MASK_40C, 438 .udma_mask = ATA_UDMA_MASK_40C,
@@ -448,7 +442,7 @@ static struct ata_port_info piix_port_info[] = {
448 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/ 442 /* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/
449 { 443 {
450 .sht = &piix_sht, 444 .sht = &piix_sht,
451 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, 445 .flags = PIIX_PATA_FLAGS,
452 .pio_mask = 0x1f, /* pio 0-4 */ 446 .pio_mask = 0x1f, /* pio 0-4 */
453 .mwdma_mask = 0x06, /* Check: maybe 0x07 */ 447 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
454 .udma_mask = ATA_UDMA2, /* UDMA33 */ 448 .udma_mask = ATA_UDMA2, /* UDMA33 */
@@ -457,7 +451,7 @@ static struct ata_port_info piix_port_info[] = {
457 /* ich_pata_66: 2 ICH controllers up to 66MHz */ 451 /* ich_pata_66: 2 ICH controllers up to 66MHz */
458 { 452 {
459 .sht = &piix_sht, 453 .sht = &piix_sht,
460 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS, 454 .flags = PIIX_PATA_FLAGS,
461 .pio_mask = 0x1f, /* pio 0-4 */ 455 .pio_mask = 0x1f, /* pio 0-4 */
462 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */ 456 .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
463 .udma_mask = ATA_UDMA4, 457 .udma_mask = ATA_UDMA4,
@@ -467,7 +461,7 @@ static struct ata_port_info piix_port_info[] = {
467 /* ich_pata_100: 3 */ 461 /* ich_pata_100: 3 */
468 { 462 {
469 .sht = &piix_sht, 463 .sht = &piix_sht,
470 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 464 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
471 .pio_mask = 0x1f, /* pio0-4 */ 465 .pio_mask = 0x1f, /* pio0-4 */
472 .mwdma_mask = 0x06, /* mwdma1-2 */ 466 .mwdma_mask = 0x06, /* mwdma1-2 */
473 .udma_mask = ATA_UDMA5, /* udma0-5 */ 467 .udma_mask = ATA_UDMA5, /* udma0-5 */
@@ -477,7 +471,7 @@ static struct ata_port_info piix_port_info[] = {
477 /* ich_pata_133: 4 ICH with full UDMA6 */ 471 /* ich_pata_133: 4 ICH with full UDMA6 */
478 { 472 {
479 .sht = &piix_sht, 473 .sht = &piix_sht,
480 .flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 474 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
481 .pio_mask = 0x1f, /* pio 0-4 */ 475 .pio_mask = 0x1f, /* pio 0-4 */
482 .mwdma_mask = 0x06, /* Check: maybe 0x07 */ 476 .mwdma_mask = 0x06, /* Check: maybe 0x07 */
483 .udma_mask = ATA_UDMA6, /* UDMA133 */ 477 .udma_mask = ATA_UDMA6, /* UDMA133 */
@@ -487,41 +481,27 @@ static struct ata_port_info piix_port_info[] = {
487 /* ich5_sata: 5 */ 481 /* ich5_sata: 5 */
488 { 482 {
489 .sht = &piix_sht, 483 .sht = &piix_sht,
490 .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | 484 .flags = PIIX_SATA_FLAGS,
491 PIIX_FLAG_IGNORE_PCS,
492 .pio_mask = 0x1f, /* pio0-4 */
493 .mwdma_mask = 0x07, /* mwdma0-2 */
494 .udma_mask = 0x7f, /* udma0-6 */
495 .port_ops = &piix_sata_ops,
496 },
497
498 /* i6300esb_sata: 6 */
499 {
500 .sht = &piix_sht,
501 .flags = ATA_FLAG_SATA |
502 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
503 .pio_mask = 0x1f, /* pio0-4 */ 485 .pio_mask = 0x1f, /* pio0-4 */
504 .mwdma_mask = 0x07, /* mwdma0-2 */ 486 .mwdma_mask = 0x07, /* mwdma0-2 */
505 .udma_mask = 0x7f, /* udma0-6 */ 487 .udma_mask = 0x7f, /* udma0-6 */
506 .port_ops = &piix_sata_ops, 488 .port_ops = &piix_sata_ops,
507 }, 489 },
508 490
509 /* ich6_sata: 7 */ 491 /* ich6_sata: 6 */
510 { 492 {
511 .sht = &piix_sht, 493 .sht = &piix_sht,
512 .flags = ATA_FLAG_SATA | 494 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
513 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
514 .pio_mask = 0x1f, /* pio0-4 */ 495 .pio_mask = 0x1f, /* pio0-4 */
515 .mwdma_mask = 0x07, /* mwdma0-2 */ 496 .mwdma_mask = 0x07, /* mwdma0-2 */
516 .udma_mask = 0x7f, /* udma0-6 */ 497 .udma_mask = 0x7f, /* udma0-6 */
517 .port_ops = &piix_sata_ops, 498 .port_ops = &piix_sata_ops,
518 }, 499 },
519 500
520 /* ich6_sata_ahci: 8 */ 501 /* ich6_sata_ahci: 7 */
521 { 502 {
522 .sht = &piix_sht, 503 .sht = &piix_sht,
523 .flags = ATA_FLAG_SATA | 504 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
524 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
525 PIIX_FLAG_AHCI, 505 PIIX_FLAG_AHCI,
526 .pio_mask = 0x1f, /* pio0-4 */ 506 .pio_mask = 0x1f, /* pio0-4 */
527 .mwdma_mask = 0x07, /* mwdma0-2 */ 507 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -529,11 +509,10 @@ static struct ata_port_info piix_port_info[] = {
529 .port_ops = &piix_sata_ops, 509 .port_ops = &piix_sata_ops,
530 }, 510 },
531 511
532 /* ich6m_sata_ahci: 9 */ 512 /* ich6m_sata_ahci: 8 */
533 { 513 {
534 .sht = &piix_sht, 514 .sht = &piix_sht,
535 .flags = ATA_FLAG_SATA | 515 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
536 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
537 PIIX_FLAG_AHCI, 516 PIIX_FLAG_AHCI,
538 .pio_mask = 0x1f, /* pio0-4 */ 517 .pio_mask = 0x1f, /* pio0-4 */
539 .mwdma_mask = 0x07, /* mwdma0-2 */ 518 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -541,11 +520,10 @@ static struct ata_port_info piix_port_info[] = {
541 .port_ops = &piix_sata_ops, 520 .port_ops = &piix_sata_ops,
542 }, 521 },
543 522
544 /* ich8_sata_ahci: 10 */ 523 /* ich8_sata_ahci: 9 */
545 { 524 {
546 .sht = &piix_sht, 525 .sht = &piix_sht,
547 .flags = ATA_FLAG_SATA | 526 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
548 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
549 PIIX_FLAG_AHCI, 527 PIIX_FLAG_AHCI,
550 .pio_mask = 0x1f, /* pio0-4 */ 528 .pio_mask = 0x1f, /* pio0-4 */
551 .mwdma_mask = 0x07, /* mwdma0-2 */ 529 .mwdma_mask = 0x07, /* mwdma0-2 */
@@ -566,10 +544,22 @@ MODULE_LICENSE("GPL");
566MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 544MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
567MODULE_VERSION(DRV_VERSION); 545MODULE_VERSION(DRV_VERSION);
568 546
569static int force_pcs = 0; 547struct ich_laptop {
570module_param(force_pcs, int, 0444); 548 u16 device;
571MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around " 549 u16 subvendor;
572 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)"); 550 u16 subdevice;
551};
552
553/*
554 * List of laptops that use short cables rather than 80 wire
555 */
556
557static const struct ich_laptop ich_laptop[] = {
558 /* devid, subvendor, subdev */
559 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
560 /* end marker */
561 { 0, }
562};
573 563
574/** 564/**
575 * piix_pata_cbl_detect - Probe host controller cable detect info 565 * piix_pata_cbl_detect - Probe host controller cable detect info
@@ -585,12 +575,24 @@ MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
585static void ich_pata_cbl_detect(struct ata_port *ap) 575static void ich_pata_cbl_detect(struct ata_port *ap)
586{ 576{
587 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 577 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
578 const struct ich_laptop *lap = &ich_laptop[0];
588 u8 tmp, mask; 579 u8 tmp, mask;
589 580
590 /* no 80c support in host controller? */ 581 /* no 80c support in host controller? */
591 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) 582 if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
592 goto cbl40; 583 goto cbl40;
593 584
585 /* Check for specials - Acer Aspire 5602WLMi */
586 while (lap->device) {
587 if (lap->device == pdev->device &&
588 lap->subvendor == pdev->subsystem_vendor &&
589 lap->subdevice == pdev->subsystem_device) {
590 ap->cbl = ATA_CBL_PATA40_SHORT;
591 return;
592 }
593 lap++;
594 }
595
594 /* check BIOS cable detect results */ 596 /* check BIOS cable detect results */
595 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; 597 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
596 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); 598 pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
@@ -659,84 +661,9 @@ static void ich_pata_error_handler(struct ata_port *ap)
659 ata_std_postreset); 661 ata_std_postreset);
660} 662}
661 663
662/**
663 * piix_sata_present_mask - determine present mask for SATA host controller
664 * @ap: Target port
665 *
666 * Reads SATA PCI device's PCI config register Port Configuration
667 * and Status (PCS) to determine port and device availability.
668 *
669 * LOCKING:
670 * None (inherited from caller).
671 *
672 * RETURNS:
673 * determined present_mask
674 */
675static unsigned int piix_sata_present_mask(struct ata_port *ap)
676{
677 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
678 struct piix_host_priv *hpriv = ap->host->private_data;
679 const unsigned int *map = hpriv->map;
680 int base = 2 * ap->port_no;
681 unsigned int present_mask = 0;
682 int port, i;
683 u16 pcs;
684
685 pci_read_config_word(pdev, ICH5_PCS, &pcs);
686 DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
687
688 for (i = 0; i < 2; i++) {
689 port = map[base + i];
690 if (port < 0)
691 continue;
692 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
693 (pcs & 1 << (hpriv->map_db->present_shift + port)))
694 present_mask |= 1 << i;
695 }
696
697 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
698 ap->id, pcs, present_mask);
699
700 return present_mask;
701}
702
703/**
704 * piix_sata_softreset - reset SATA host port via ATA SRST
705 * @ap: port to reset
706 * @classes: resulting classes of attached devices
707 *
708 * Reset SATA host port via ATA SRST. On controllers with
709 * reliable PCS present bits, the bits are used to determine
710 * device presence.
711 *
712 * LOCKING:
713 * Kernel thread context (may sleep)
714 *
715 * RETURNS:
716 * 0 on success, -errno otherwise.
717 */
718static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
719{
720 unsigned int present_mask;
721 int i, rc;
722
723 present_mask = piix_sata_present_mask(ap);
724
725 rc = ata_std_softreset(ap, classes);
726 if (rc)
727 return rc;
728
729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
730 if (!(present_mask & (1 << i)))
731 classes[i] = ATA_DEV_NONE;
732 }
733
734 return 0;
735}
736
737static void piix_sata_error_handler(struct ata_port *ap) 664static void piix_sata_error_handler(struct ata_port *ap)
738{ 665{
739 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL, 666 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
740 ata_std_postreset); 667 ata_std_postreset);
741} 668}
742 669
@@ -1051,18 +978,6 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
1051 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 978 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
1052 msleep(150); 979 msleep(150);
1053 } 980 }
1054
1055 if (force_pcs == 1) {
1056 dev_printk(KERN_INFO, &pdev->dev,
1057 "force ignoring PCS (0x%x)\n", new_pcs);
1058 pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
1059 pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
1060 } else if (force_pcs == 2) {
1061 dev_printk(KERN_INFO, &pdev->dev,
1062 "force honoring PCS (0x%x)\n", new_pcs);
1063 pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
1064 pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
1065 }
1066} 981}
1067 982
1068static void __devinit piix_init_sata_map(struct pci_dev *pdev, 983static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -1112,7 +1027,6 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1112 "invalid MAP value %u\n", map_value); 1027 "invalid MAP value %u\n", map_value);
1113 1028
1114 hpriv->map = map; 1029 hpriv->map = map;
1115 hpriv->map_db = map_db;
1116} 1030}
1117 1031
1118/** 1032/**
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b5f2da6ac80e..8816e30fb7a4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -199,7 +199,8 @@ static const u8 ata_rw_cmds[] = {
199 199
200/** 200/**
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure 202 * @tf: command to examine and configure
203 * @dev: device tf belongs to
203 * 204 *
204 * Examine the device configuration and tf->flags to calculate 205 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use. 206 * the proper read/write commands and protocol to use.
@@ -207,10 +208,8 @@ static const u8 ata_rw_cmds[] = {
207 * LOCKING: 208 * LOCKING:
208 * caller. 209 * caller.
209 */ 210 */
210int ata_rwcmd_protocol(struct ata_queued_cmd *qc) 211static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
211{ 212{
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd; 213 u8 cmd;
215 214
216 int index, fua, lba48, write; 215 int index, fua, lba48, write;
@@ -222,7 +221,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
222 if (dev->flags & ATA_DFLAG_PIO) { 221 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO; 222 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8; 223 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { 224 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */ 225 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO; 226 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8; 227 index = dev->multi_count ? 0 : 8;
@@ -240,6 +239,174 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
240} 239}
241 240
242/** 241/**
242 * ata_tf_read_block - Read block address from ATA taskfile
243 * @tf: ATA taskfile of interest
244 * @dev: ATA device @tf belongs to
245 *
246 * LOCKING:
247 * None.
248 *
249 * Read block address from @tf. This function can handle all
250 * three address formats - LBA, LBA48 and CHS. tf->protocol and
251 * flags select the address format to use.
252 *
253 * RETURNS:
254 * Block address read from @tf.
255 */
256u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
257{
258 u64 block = 0;
259
260 if (tf->flags & ATA_TFLAG_LBA) {
261 if (tf->flags & ATA_TFLAG_LBA48) {
262 block |= (u64)tf->hob_lbah << 40;
263 block |= (u64)tf->hob_lbam << 32;
264 block |= tf->hob_lbal << 24;
265 } else
266 block |= (tf->device & 0xf) << 24;
267
268 block |= tf->lbah << 16;
269 block |= tf->lbam << 8;
270 block |= tf->lbal;
271 } else {
272 u32 cyl, head, sect;
273
274 cyl = tf->lbam | (tf->lbah << 8);
275 head = tf->device & 0xf;
276 sect = tf->lbal;
277
278 block = (cyl * dev->heads + head) * dev->sectors + sect;
279 }
280
281 return block;
282}
283
284/**
285 * ata_build_rw_tf - Build ATA taskfile for given read/write request
286 * @tf: Target ATA taskfile
287 * @dev: ATA device @tf belongs to
288 * @block: Block address
289 * @n_block: Number of blocks
290 * @tf_flags: RW/FUA etc...
291 * @tag: tag
292 *
293 * LOCKING:
294 * None.
295 *
296 * Build ATA taskfile @tf for read/write request described by
297 * @block, @n_block, @tf_flags and @tag on @dev.
298 *
299 * RETURNS:
300 *
301 * 0 on success, -ERANGE if the request is too large for @dev,
302 * -EINVAL if the request is invalid.
303 */
304int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
305 u64 block, u32 n_block, unsigned int tf_flags,
306 unsigned int tag)
307{
308 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
309 tf->flags |= tf_flags;
310
311 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
312 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
313 likely(tag != ATA_TAG_INTERNAL)) {
314 /* yay, NCQ */
315 if (!lba_48_ok(block, n_block))
316 return -ERANGE;
317
318 tf->protocol = ATA_PROT_NCQ;
319 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
320
321 if (tf->flags & ATA_TFLAG_WRITE)
322 tf->command = ATA_CMD_FPDMA_WRITE;
323 else
324 tf->command = ATA_CMD_FPDMA_READ;
325
326 tf->nsect = tag << 3;
327 tf->hob_feature = (n_block >> 8) & 0xff;
328 tf->feature = n_block & 0xff;
329
330 tf->hob_lbah = (block >> 40) & 0xff;
331 tf->hob_lbam = (block >> 32) & 0xff;
332 tf->hob_lbal = (block >> 24) & 0xff;
333 tf->lbah = (block >> 16) & 0xff;
334 tf->lbam = (block >> 8) & 0xff;
335 tf->lbal = block & 0xff;
336
337 tf->device = 1 << 6;
338 if (tf->flags & ATA_TFLAG_FUA)
339 tf->device |= 1 << 7;
340 } else if (dev->flags & ATA_DFLAG_LBA) {
341 tf->flags |= ATA_TFLAG_LBA;
342
343 if (lba_28_ok(block, n_block)) {
344 /* use LBA28 */
345 tf->device |= (block >> 24) & 0xf;
346 } else if (lba_48_ok(block, n_block)) {
347 if (!(dev->flags & ATA_DFLAG_LBA48))
348 return -ERANGE;
349
350 /* use LBA48 */
351 tf->flags |= ATA_TFLAG_LBA48;
352
353 tf->hob_nsect = (n_block >> 8) & 0xff;
354
355 tf->hob_lbah = (block >> 40) & 0xff;
356 tf->hob_lbam = (block >> 32) & 0xff;
357 tf->hob_lbal = (block >> 24) & 0xff;
358 } else
359 /* request too large even for LBA48 */
360 return -ERANGE;
361
362 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
363 return -EINVAL;
364
365 tf->nsect = n_block & 0xff;
366
367 tf->lbah = (block >> 16) & 0xff;
368 tf->lbam = (block >> 8) & 0xff;
369 tf->lbal = block & 0xff;
370
371 tf->device |= ATA_LBA;
372 } else {
373 /* CHS */
374 u32 sect, head, cyl, track;
375
376 /* The request -may- be too large for CHS addressing. */
377 if (!lba_28_ok(block, n_block))
378 return -ERANGE;
379
380 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
381 return -EINVAL;
382
383 /* Convert LBA to CHS */
384 track = (u32)block / dev->sectors;
385 cyl = track / dev->heads;
386 head = track % dev->heads;
387 sect = (u32)block % dev->sectors + 1;
388
389 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
390 (u32)block, track, cyl, head, sect);
391
392 /* Check whether the converted CHS can fit.
393 Cylinder: 0-65535
394 Head: 0-15
395 Sector: 1-255*/
396 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
397 return -ERANGE;
398
399 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
400 tf->lbal = sect;
401 tf->lbam = cyl;
402 tf->lbah = cyl >> 8;
403 tf->device |= head;
404 }
405
406 return 0;
407}
408
409/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 410 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask 411 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask 412 * @mwdma_mask: mwdma_mask
@@ -997,13 +1164,13 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
997} 1164}
998 1165
999/** 1166/**
1000 * ata_exec_internal - execute libata internal command 1167 * ata_exec_internal_sg - execute libata internal command
1001 * @dev: Device to which the command is sent 1168 * @dev: Device to which the command is sent
1002 * @tf: Taskfile registers for the command and the result 1169 * @tf: Taskfile registers for the command and the result
1003 * @cdb: CDB for packet command 1170 * @cdb: CDB for packet command
1004 * @dma_dir: Data tranfer direction of the command 1171 * @dma_dir: Data tranfer direction of the command
1005 * @buf: Data buffer of the command 1172 * @sg: sg list for the data buffer of the command
1006 * @buflen: Length of data buffer 1173 * @n_elem: Number of sg entries
1007 * 1174 *
1008 * Executes libata internal command with timeout. @tf contains 1175 * Executes libata internal command with timeout. @tf contains
1009 * command on entry and result on return. Timeout and error 1176 * command on entry and result on return. Timeout and error
@@ -1017,9 +1184,10 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1017 * RETURNS: 1184 * RETURNS:
1018 * Zero on success, AC_ERR_* mask on failure 1185 * Zero on success, AC_ERR_* mask on failure
1019 */ 1186 */
1020unsigned ata_exec_internal(struct ata_device *dev, 1187unsigned ata_exec_internal_sg(struct ata_device *dev,
1021 struct ata_taskfile *tf, const u8 *cdb, 1188 struct ata_taskfile *tf, const u8 *cdb,
1022 int dma_dir, void *buf, unsigned int buflen) 1189 int dma_dir, struct scatterlist *sg,
1190 unsigned int n_elem)
1023{ 1191{
1024 struct ata_port *ap = dev->ap; 1192 struct ata_port *ap = dev->ap;
1025 u8 command = tf->command; 1193 u8 command = tf->command;
@@ -1075,7 +1243,12 @@ unsigned ata_exec_internal(struct ata_device *dev,
1075 qc->flags |= ATA_QCFLAG_RESULT_TF; 1243 qc->flags |= ATA_QCFLAG_RESULT_TF;
1076 qc->dma_dir = dma_dir; 1244 qc->dma_dir = dma_dir;
1077 if (dma_dir != DMA_NONE) { 1245 if (dma_dir != DMA_NONE) {
1078 ata_sg_init_one(qc, buf, buflen); 1246 unsigned int i, buflen = 0;
1247
1248 for (i = 0; i < n_elem; i++)
1249 buflen += sg[i].length;
1250
1251 ata_sg_init(qc, sg, n_elem);
1079 qc->nsect = buflen / ATA_SECT_SIZE; 1252 qc->nsect = buflen / ATA_SECT_SIZE;
1080 } 1253 }
1081 1254
@@ -1159,6 +1332,35 @@ unsigned ata_exec_internal(struct ata_device *dev,
1159} 1332}
1160 1333
1161/** 1334/**
1335 * ata_exec_internal_sg - execute libata internal command
1336 * @dev: Device to which the command is sent
1337 * @tf: Taskfile registers for the command and the result
1338 * @cdb: CDB for packet command
1339 * @dma_dir: Data tranfer direction of the command
1340 * @buf: Data buffer of the command
1341 * @buflen: Length of data buffer
1342 *
1343 * Wrapper around ata_exec_internal_sg() which takes simple
1344 * buffer instead of sg list.
1345 *
1346 * LOCKING:
1347 * None. Should be called with kernel context, might sleep.
1348 *
1349 * RETURNS:
1350 * Zero on success, AC_ERR_* mask on failure
1351 */
1352unsigned ata_exec_internal(struct ata_device *dev,
1353 struct ata_taskfile *tf, const u8 *cdb,
1354 int dma_dir, void *buf, unsigned int buflen)
1355{
1356 struct scatterlist sg;
1357
1358 sg_init_one(&sg, buf, buflen);
1359
1360 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
1361}
1362
1363/**
1162 * ata_do_simple_cmd - execute simple internal command 1364 * ata_do_simple_cmd - execute simple internal command
1163 * @dev: Device to which the command is sent 1365 * @dev: Device to which the command is sent
1164 * @cmd: Opcode to execute 1366 * @cmd: Opcode to execute
@@ -1222,7 +1424,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1222 * ata_dev_read_id - Read ID data from the specified device 1424 * ata_dev_read_id - Read ID data from the specified device
1223 * @dev: target device 1425 * @dev: target device
1224 * @p_class: pointer to class of the target device (may be changed) 1426 * @p_class: pointer to class of the target device (may be changed)
1225 * @post_reset: is this read ID post-reset? 1427 * @flags: ATA_READID_* flags
1226 * @id: buffer to read IDENTIFY data into 1428 * @id: buffer to read IDENTIFY data into
1227 * 1429 *
1228 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1430 * Read ID data from the specified device. ATA_CMD_ID_ATA is
@@ -1237,7 +1439,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1237 * 0 on success, -errno otherwise. 1439 * 0 on success, -errno otherwise.
1238 */ 1440 */
1239int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1441int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1240 int post_reset, u16 *id) 1442 unsigned int flags, u16 *id)
1241{ 1443{
1242 struct ata_port *ap = dev->ap; 1444 struct ata_port *ap = dev->ap;
1243 unsigned int class = *p_class; 1445 unsigned int class = *p_class;
@@ -1269,10 +1471,17 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1269 } 1471 }
1270 1472
1271 tf.protocol = ATA_PROT_PIO; 1473 tf.protocol = ATA_PROT_PIO;
1474 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1272 1475
1273 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1476 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1274 id, sizeof(id[0]) * ATA_ID_WORDS); 1477 id, sizeof(id[0]) * ATA_ID_WORDS);
1275 if (err_mask) { 1478 if (err_mask) {
1479 if (err_mask & AC_ERR_NODEV_HINT) {
1480 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1481 ap->id, dev->devno);
1482 return -ENOENT;
1483 }
1484
1276 rc = -EIO; 1485 rc = -EIO;
1277 reason = "I/O error"; 1486 reason = "I/O error";
1278 goto err_out; 1487 goto err_out;
@@ -1292,7 +1501,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1292 goto err_out; 1501 goto err_out;
1293 } 1502 }
1294 1503
1295 if (post_reset && class == ATA_DEV_ATA) { 1504 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1296 /* 1505 /*
1297 * The exact sequence expected by certain pre-ATA4 drives is: 1506 * The exact sequence expected by certain pre-ATA4 drives is:
1298 * SRST RESET 1507 * SRST RESET
@@ -1312,7 +1521,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1312 /* current CHS translation info (id[53-58]) might be 1521 /* current CHS translation info (id[53-58]) might be
1313 * changed. reread the identify device info. 1522 * changed. reread the identify device info.
1314 */ 1523 */
1315 post_reset = 0; 1524 flags &= ~ATA_READID_POSTRESET;
1316 goto retry; 1525 goto retry;
1317 } 1526 }
1318 } 1527 }
@@ -1343,7 +1552,10 @@ static void ata_dev_config_ncq(struct ata_device *dev,
1343 desc[0] = '\0'; 1552 desc[0] = '\0';
1344 return; 1553 return;
1345 } 1554 }
1346 1555 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1556 snprintf(desc, desc_sz, "NCQ (not used)");
1557 return;
1558 }
1347 if (ap->flags & ATA_FLAG_NCQ) { 1559 if (ap->flags & ATA_FLAG_NCQ) {
1348 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 1560 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1349 dev->flags |= ATA_DFLAG_NCQ; 1561 dev->flags |= ATA_DFLAG_NCQ;
@@ -1372,7 +1584,6 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
1372/** 1584/**
1373 * ata_dev_configure - Configure the specified ATA/ATAPI device 1585 * ata_dev_configure - Configure the specified ATA/ATAPI device
1374 * @dev: Target device to configure 1586 * @dev: Target device to configure
1375 * @print_info: Enable device info printout
1376 * 1587 *
1377 * Configure @dev according to @dev->id. Generic and low-level 1588 * Configure @dev according to @dev->id. Generic and low-level
1378 * driver specific fixups are also applied. 1589 * driver specific fixups are also applied.
@@ -1383,9 +1594,10 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
1383 * RETURNS: 1594 * RETURNS:
1384 * 0 on success, -errno otherwise 1595 * 0 on success, -errno otherwise
1385 */ 1596 */
1386int ata_dev_configure(struct ata_device *dev, int print_info) 1597int ata_dev_configure(struct ata_device *dev)
1387{ 1598{
1388 struct ata_port *ap = dev->ap; 1599 struct ata_port *ap = dev->ap;
1600 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1389 const u16 *id = dev->id; 1601 const u16 *id = dev->id;
1390 unsigned int xfer_mask; 1602 unsigned int xfer_mask;
1391 char revbuf[7]; /* XYZ-99\0 */ 1603 char revbuf[7]; /* XYZ-99\0 */
@@ -1452,6 +1664,10 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1452 if (ata_id_has_lba48(id)) { 1664 if (ata_id_has_lba48(id)) {
1453 dev->flags |= ATA_DFLAG_LBA48; 1665 dev->flags |= ATA_DFLAG_LBA48;
1454 lba_desc = "LBA48"; 1666 lba_desc = "LBA48";
1667
1668 if (dev->n_sectors >= (1UL << 28) &&
1669 ata_id_has_flush_ext(id))
1670 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1455 } 1671 }
1456 1672
1457 /* config NCQ */ 1673 /* config NCQ */
@@ -1528,6 +1744,11 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
1528 cdb_intr_string); 1744 cdb_intr_string);
1529 } 1745 }
1530 1746
1747 /* determine max_sectors */
1748 dev->max_sectors = ATA_MAX_SECTORS;
1749 if (dev->flags & ATA_DFLAG_LBA48)
1750 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1751
1531 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 1752 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1532 /* Let the user know. We don't want to disallow opens for 1753 /* Let the user know. We don't want to disallow opens for
1533 rescue purposes, or in case the vendor is just a blithering 1754 rescue purposes, or in case the vendor is just a blithering
@@ -1629,11 +1850,14 @@ int ata_bus_probe(struct ata_port *ap)
1629 if (!ata_dev_enabled(dev)) 1850 if (!ata_dev_enabled(dev))
1630 continue; 1851 continue;
1631 1852
1632 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); 1853 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1854 dev->id);
1633 if (rc) 1855 if (rc)
1634 goto fail; 1856 goto fail;
1635 1857
1636 rc = ata_dev_configure(dev, 1); 1858 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1859 rc = ata_dev_configure(dev);
1860 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1637 if (rc) 1861 if (rc)
1638 goto fail; 1862 goto fail;
1639 } 1863 }
@@ -2151,6 +2375,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2151 2375
2152static int ata_dev_set_mode(struct ata_device *dev) 2376static int ata_dev_set_mode(struct ata_device *dev)
2153{ 2377{
2378 struct ata_eh_context *ehc = &dev->ap->eh_context;
2154 unsigned int err_mask; 2379 unsigned int err_mask;
2155 int rc; 2380 int rc;
2156 2381
@@ -2165,7 +2390,9 @@ static int ata_dev_set_mode(struct ata_device *dev)
2165 return -EIO; 2390 return -EIO;
2166 } 2391 }
2167 2392
2393 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2168 rc = ata_dev_revalidate(dev, 0); 2394 rc = ata_dev_revalidate(dev, 0);
2395 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2169 if (rc) 2396 if (rc)
2170 return rc; 2397 return rc;
2171 2398
@@ -2323,11 +2550,14 @@ static inline void ata_tf_to_host(struct ata_port *ap,
2323 * Sleep until ATA Status register bit BSY clears, 2550 * Sleep until ATA Status register bit BSY clears,
2324 * or a timeout occurs. 2551 * or a timeout occurs.
2325 * 2552 *
2326 * LOCKING: None. 2553 * LOCKING:
2554 * Kernel thread context (may sleep).
2555 *
2556 * RETURNS:
2557 * 0 on success, -errno otherwise.
2327 */ 2558 */
2328 2559int ata_busy_sleep(struct ata_port *ap,
2329unsigned int ata_busy_sleep (struct ata_port *ap, 2560 unsigned long tmout_pat, unsigned long tmout)
2330 unsigned long tmout_pat, unsigned long tmout)
2331{ 2561{
2332 unsigned long timer_start, timeout; 2562 unsigned long timer_start, timeout;
2333 u8 status; 2563 u8 status;
@@ -2335,27 +2565,32 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2335 status = ata_busy_wait(ap, ATA_BUSY, 300); 2565 status = ata_busy_wait(ap, ATA_BUSY, 300);
2336 timer_start = jiffies; 2566 timer_start = jiffies;
2337 timeout = timer_start + tmout_pat; 2567 timeout = timer_start + tmout_pat;
2338 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2568 while (status != 0xff && (status & ATA_BUSY) &&
2569 time_before(jiffies, timeout)) {
2339 msleep(50); 2570 msleep(50);
2340 status = ata_busy_wait(ap, ATA_BUSY, 3); 2571 status = ata_busy_wait(ap, ATA_BUSY, 3);
2341 } 2572 }
2342 2573
2343 if (status & ATA_BUSY) 2574 if (status != 0xff && (status & ATA_BUSY))
2344 ata_port_printk(ap, KERN_WARNING, 2575 ata_port_printk(ap, KERN_WARNING,
2345 "port is slow to respond, please be patient " 2576 "port is slow to respond, please be patient "
2346 "(Status 0x%x)\n", status); 2577 "(Status 0x%x)\n", status);
2347 2578
2348 timeout = timer_start + tmout; 2579 timeout = timer_start + tmout;
2349 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2580 while (status != 0xff && (status & ATA_BUSY) &&
2581 time_before(jiffies, timeout)) {
2350 msleep(50); 2582 msleep(50);
2351 status = ata_chk_status(ap); 2583 status = ata_chk_status(ap);
2352 } 2584 }
2353 2585
2586 if (status == 0xff)
2587 return -ENODEV;
2588
2354 if (status & ATA_BUSY) { 2589 if (status & ATA_BUSY) {
2355 ata_port_printk(ap, KERN_ERR, "port failed to respond " 2590 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2356 "(%lu secs, Status 0x%x)\n", 2591 "(%lu secs, Status 0x%x)\n",
2357 tmout / HZ, status); 2592 tmout / HZ, status);
2358 return 1; 2593 return -EBUSY;
2359 } 2594 }
2360 2595
2361 return 0; 2596 return 0;
@@ -2446,10 +2681,8 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2446 * the bus shows 0xFF because the odd clown forgets the D7 2681 * the bus shows 0xFF because the odd clown forgets the D7
2447 * pulldown resistor. 2682 * pulldown resistor.
2448 */ 2683 */
2449 if (ata_check_status(ap) == 0xFF) { 2684 if (ata_check_status(ap) == 0xFF)
2450 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n"); 2685 return 0;
2451 return AC_ERR_OTHER;
2452 }
2453 2686
2454 ata_bus_post_reset(ap, devmask); 2687 ata_bus_post_reset(ap, devmask);
2455 2688
@@ -2775,9 +3008,9 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2775} 3008}
2776 3009
2777/** 3010/**
2778 * sata_std_hardreset - reset host port via SATA phy reset 3011 * sata_port_hardreset - reset port via SATA phy reset
2779 * @ap: port to reset 3012 * @ap: port to reset
2780 * @class: resulting class of attached device 3013 * @timing: timing parameters { interval, duratinon, timeout } in msec
2781 * 3014 *
2782 * SATA phy-reset host port using DET bits of SControl register. 3015 * SATA phy-reset host port using DET bits of SControl register.
2783 * 3016 *
@@ -2787,10 +3020,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2787 * RETURNS: 3020 * RETURNS:
2788 * 0 on success, -errno otherwise. 3021 * 0 on success, -errno otherwise.
2789 */ 3022 */
2790int sata_std_hardreset(struct ata_port *ap, unsigned int *class) 3023int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
2791{ 3024{
2792 struct ata_eh_context *ehc = &ap->eh_context;
2793 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2794 u32 scontrol; 3025 u32 scontrol;
2795 int rc; 3026 int rc;
2796 3027
@@ -2803,24 +3034,24 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2803 * and Sil3124. 3034 * and Sil3124.
2804 */ 3035 */
2805 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3036 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2806 return rc; 3037 goto out;
2807 3038
2808 scontrol = (scontrol & 0x0f0) | 0x304; 3039 scontrol = (scontrol & 0x0f0) | 0x304;
2809 3040
2810 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 3041 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2811 return rc; 3042 goto out;
2812 3043
2813 sata_set_spd(ap); 3044 sata_set_spd(ap);
2814 } 3045 }
2815 3046
2816 /* issue phy wake/reset */ 3047 /* issue phy wake/reset */
2817 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 3048 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2818 return rc; 3049 goto out;
2819 3050
2820 scontrol = (scontrol & 0x0f0) | 0x301; 3051 scontrol = (scontrol & 0x0f0) | 0x301;
2821 3052
2822 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) 3053 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2823 return rc; 3054 goto out;
2824 3055
2825 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3056 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2826 * 10.4.2 says at least 1 ms. 3057 * 10.4.2 says at least 1 ms.
@@ -2828,7 +3059,40 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2828 msleep(1); 3059 msleep(1);
2829 3060
2830 /* bring phy back */ 3061 /* bring phy back */
2831 sata_phy_resume(ap, timing); 3062 rc = sata_phy_resume(ap, timing);
3063 out:
3064 DPRINTK("EXIT, rc=%d\n", rc);
3065 return rc;
3066}
3067
3068/**
3069 * sata_std_hardreset - reset host port via SATA phy reset
3070 * @ap: port to reset
3071 * @class: resulting class of attached device
3072 *
3073 * SATA phy-reset host port using DET bits of SControl register,
3074 * wait for !BSY and classify the attached device.
3075 *
3076 * LOCKING:
3077 * Kernel thread context (may sleep)
3078 *
3079 * RETURNS:
3080 * 0 on success, -errno otherwise.
3081 */
3082int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3083{
3084 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3085 int rc;
3086
3087 DPRINTK("ENTER\n");
3088
3089 /* do hardreset */
3090 rc = sata_port_hardreset(ap, timing);
3091 if (rc) {
3092 ata_port_printk(ap, KERN_ERR,
3093 "COMRESET failed (errno=%d)\n", rc);
3094 return rc;
3095 }
2832 3096
2833 /* TODO: phy layer with polling, timeouts, etc. */ 3097 /* TODO: phy layer with polling, timeouts, etc. */
2834 if (ata_port_offline(ap)) { 3098 if (ata_port_offline(ap)) {
@@ -2967,7 +3231,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2967/** 3231/**
2968 * ata_dev_revalidate - Revalidate ATA device 3232 * ata_dev_revalidate - Revalidate ATA device
2969 * @dev: device to revalidate 3233 * @dev: device to revalidate
2970 * @post_reset: is this revalidation after reset? 3234 * @readid_flags: read ID flags
2971 * 3235 *
2972 * Re-read IDENTIFY page and make sure @dev is still attached to 3236 * Re-read IDENTIFY page and make sure @dev is still attached to
2973 * the port. 3237 * the port.
@@ -2978,7 +3242,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2978 * RETURNS: 3242 * RETURNS:
2979 * 0 on success, negative errno otherwise 3243 * 0 on success, negative errno otherwise
2980 */ 3244 */
2981int ata_dev_revalidate(struct ata_device *dev, int post_reset) 3245int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
2982{ 3246{
2983 unsigned int class = dev->class; 3247 unsigned int class = dev->class;
2984 u16 *id = (void *)dev->ap->sector_buf; 3248 u16 *id = (void *)dev->ap->sector_buf;
@@ -2990,7 +3254,7 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2990 } 3254 }
2991 3255
2992 /* read ID data */ 3256 /* read ID data */
2993 rc = ata_dev_read_id(dev, &class, post_reset, id); 3257 rc = ata_dev_read_id(dev, &class, readid_flags, id);
2994 if (rc) 3258 if (rc)
2995 goto fail; 3259 goto fail;
2996 3260
@@ -3003,7 +3267,7 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
3003 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3267 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3004 3268
3005 /* configure device according to the new ID */ 3269 /* configure device according to the new ID */
3006 rc = ata_dev_configure(dev, 0); 3270 rc = ata_dev_configure(dev);
3007 if (rc == 0) 3271 if (rc == 0)
3008 return 0; 3272 return 0;
3009 3273
@@ -3012,37 +3276,55 @@ int ata_dev_revalidate(struct ata_device *dev, int post_reset)
3012 return rc; 3276 return rc;
3013} 3277}
3014 3278
3015static const char * const ata_dma_blacklist [] = { 3279struct ata_blacklist_entry {
3016 "WDC AC11000H", NULL, 3280 const char *model_num;
3017 "WDC AC22100H", NULL, 3281 const char *model_rev;
3018 "WDC AC32500H", NULL, 3282 unsigned long horkage;
3019 "WDC AC33100H", NULL, 3283};
3020 "WDC AC31600H", NULL, 3284
3021 "WDC AC32100H", "24.09P07", 3285static const struct ata_blacklist_entry ata_device_blacklist [] = {
3022 "WDC AC23200L", "21.10N21", 3286 /* Devices with DMA related problems under Linux */
3023 "Compaq CRD-8241B", NULL, 3287 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3024 "CRD-8400B", NULL, 3288 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3025 "CRD-8480B", NULL, 3289 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3026 "CRD-8482B", NULL, 3290 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3027 "CRD-84", NULL, 3291 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3028 "SanDisk SDP3B", NULL, 3292 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3029 "SanDisk SDP3B-64", NULL, 3293 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3030 "SANYO CD-ROM CRD", NULL, 3294 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3031 "HITACHI CDR-8", NULL, 3295 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3032 "HITACHI CDR-8335", NULL, 3296 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3033 "HITACHI CDR-8435", NULL, 3297 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3034 "Toshiba CD-ROM XM-6202B", NULL, 3298 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3035 "TOSHIBA CD-ROM XM-1702BC", NULL, 3299 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3036 "CD-532E-A", NULL, 3300 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3037 "E-IDE CD-ROM CR-840", NULL, 3301 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3038 "CD-ROM Drive/F5A", NULL, 3302 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3039 "WPI CDD-820", NULL, 3303 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3040 "SAMSUNG CD-ROM SC-148C", NULL, 3304 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3041 "SAMSUNG CD-ROM SC", NULL, 3305 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3042 "SanDisk SDP3B-64", NULL, 3306 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3043 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL, 3307 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3044 "_NEC DV5800A", NULL, 3308 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3045 "SAMSUNG CD-ROM SN-124", "N001" 3309 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3310 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3311 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3312 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3313 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3314 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3315 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3316 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3317
3318 /* Devices we expect to fail diagnostics */
3319
3320 /* Devices where NCQ should be avoided */
3321 /* NCQ is slow */
3322 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3323
3324 /* Devices with NCQ limits */
3325
3326 /* End Marker */
3327 { }
3046}; 3328};
3047 3329
3048static int ata_strim(char *s, size_t len) 3330static int ata_strim(char *s, size_t len)
@@ -3057,20 +3339,12 @@ static int ata_strim(char *s, size_t len)
3057 return len; 3339 return len;
3058} 3340}
3059 3341
3060static int ata_dma_blacklisted(const struct ata_device *dev) 3342unsigned long ata_device_blacklisted(const struct ata_device *dev)
3061{ 3343{
3062 unsigned char model_num[40]; 3344 unsigned char model_num[40];
3063 unsigned char model_rev[16]; 3345 unsigned char model_rev[16];
3064 unsigned int nlen, rlen; 3346 unsigned int nlen, rlen;
3065 int i; 3347 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3066
3067 /* We don't support polling DMA.
3068 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3069 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3070 */
3071 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3072 (dev->flags & ATA_DFLAG_CDB_INTR))
3073 return 1;
3074 3348
3075 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 3349 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3076 sizeof(model_num)); 3350 sizeof(model_num));
@@ -3079,17 +3353,30 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
3079 nlen = ata_strim(model_num, sizeof(model_num)); 3353 nlen = ata_strim(model_num, sizeof(model_num));
3080 rlen = ata_strim(model_rev, sizeof(model_rev)); 3354 rlen = ata_strim(model_rev, sizeof(model_rev));
3081 3355
3082 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) { 3356 while (ad->model_num) {
3083 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) { 3357 if (!strncmp(ad->model_num, model_num, nlen)) {
3084 if (ata_dma_blacklist[i+1] == NULL) 3358 if (ad->model_rev == NULL)
3085 return 1; 3359 return ad->horkage;
3086 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen)) 3360 if (!strncmp(ad->model_rev, model_rev, rlen))
3087 return 1; 3361 return ad->horkage;
3088 } 3362 }
3363 ad++;
3089 } 3364 }
3090 return 0; 3365 return 0;
3091} 3366}
3092 3367
3368static int ata_dma_blacklisted(const struct ata_device *dev)
3369{
3370 /* We don't support polling DMA.
3371 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3372 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3373 */
3374 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3375 (dev->flags & ATA_DFLAG_CDB_INTR))
3376 return 1;
3377 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3378}
3379
3093/** 3380/**
3094 * ata_dev_xfermask - Compute supported xfermask of the given device 3381 * ata_dev_xfermask - Compute supported xfermask of the given device
3095 * @dev: Device to compute xfermask for 3382 * @dev: Device to compute xfermask for
@@ -3117,6 +3404,13 @@ static void ata_dev_xfermask(struct ata_device *dev)
3117 */ 3404 */
3118 if (ap->cbl == ATA_CBL_PATA40) 3405 if (ap->cbl == ATA_CBL_PATA40)
3119 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 3406 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3407 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3408 * host side are checked drive side as well. Cases where we know a
3409 * 40wire cable is used safely for 80 are not checked here.
3410 */
3411 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3412 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3413
3120 3414
3121 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 3415 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3122 dev->mwdma_mask, dev->udma_mask); 3416 dev->mwdma_mask, dev->udma_mask);
@@ -3234,8 +3528,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
3234 * LOCKING: 3528 * LOCKING:
3235 * spin_lock_irqsave(host lock) 3529 * spin_lock_irqsave(host lock)
3236 */ 3530 */
3237 3531void ata_sg_clean(struct ata_queued_cmd *qc)
3238static void ata_sg_clean(struct ata_queued_cmd *qc)
3239{ 3532{
3240 struct ata_port *ap = qc->ap; 3533 struct ata_port *ap = qc->ap;
3241 struct scatterlist *sg = qc->__sg; 3534 struct scatterlist *sg = qc->__sg;
@@ -3393,19 +3686,15 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3393 3686
3394void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 3687void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3395{ 3688{
3396 struct scatterlist *sg;
3397
3398 qc->flags |= ATA_QCFLAG_SINGLE; 3689 qc->flags |= ATA_QCFLAG_SINGLE;
3399 3690
3400 memset(&qc->sgent, 0, sizeof(qc->sgent));
3401 qc->__sg = &qc->sgent; 3691 qc->__sg = &qc->sgent;
3402 qc->n_elem = 1; 3692 qc->n_elem = 1;
3403 qc->orig_n_elem = 1; 3693 qc->orig_n_elem = 1;
3404 qc->buf_virt = buf; 3694 qc->buf_virt = buf;
3405 qc->nbytes = buflen; 3695 qc->nbytes = buflen;
3406 3696
3407 sg = qc->__sg; 3697 sg_init_one(&qc->sgent, buf, buflen);
3408 sg_init_one(sg, buf, buflen);
3409} 3698}
3410 3699
3411/** 3700/**
@@ -4198,8 +4487,12 @@ fsm_start:
4198 /* device stops HSM for abort/error */ 4487 /* device stops HSM for abort/error */
4199 qc->err_mask |= AC_ERR_DEV; 4488 qc->err_mask |= AC_ERR_DEV;
4200 else 4489 else
4201 /* HSM violation. Let EH handle this */ 4490 /* HSM violation. Let EH handle this.
4202 qc->err_mask |= AC_ERR_HSM; 4491 * Phantom devices also trigger this
4492 * condition. Mark hint.
4493 */
4494 qc->err_mask |= AC_ERR_HSM |
4495 AC_ERR_NODEV_HINT;
4203 4496
4204 ap->hsm_task_state = HSM_ST_ERR; 4497 ap->hsm_task_state = HSM_ST_ERR;
4205 goto fsm_start; 4498 goto fsm_start;
@@ -4439,6 +4732,14 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
4439 qc->complete_fn(qc); 4732 qc->complete_fn(qc);
4440} 4733}
4441 4734
4735static void fill_result_tf(struct ata_queued_cmd *qc)
4736{
4737 struct ata_port *ap = qc->ap;
4738
4739 ap->ops->tf_read(ap, &qc->result_tf);
4740 qc->result_tf.flags = qc->tf.flags;
4741}
4742
4442/** 4743/**
4443 * ata_qc_complete - Complete an active ATA command 4744 * ata_qc_complete - Complete an active ATA command
4444 * @qc: Command to complete 4745 * @qc: Command to complete
@@ -4476,7 +4777,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4476 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4777 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4477 if (!ata_tag_internal(qc->tag)) { 4778 if (!ata_tag_internal(qc->tag)) {
4478 /* always fill result TF for failed qc */ 4779 /* always fill result TF for failed qc */
4479 ap->ops->tf_read(ap, &qc->result_tf); 4780 fill_result_tf(qc);
4480 ata_qc_schedule_eh(qc); 4781 ata_qc_schedule_eh(qc);
4481 return; 4782 return;
4482 } 4783 }
@@ -4484,7 +4785,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4484 4785
4485 /* read result TF if requested */ 4786 /* read result TF if requested */
4486 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4787 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4487 ap->ops->tf_read(ap, &qc->result_tf); 4788 fill_result_tf(qc);
4488 4789
4489 __ata_qc_complete(qc); 4790 __ata_qc_complete(qc);
4490 } else { 4791 } else {
@@ -4493,7 +4794,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
4493 4794
4494 /* read result TF if failed or requested */ 4795 /* read result TF if failed or requested */
4495 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4796 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4496 ap->ops->tf_read(ap, &qc->result_tf); 4797 fill_result_tf(qc);
4497 4798
4498 __ata_qc_complete(qc); 4799 __ata_qc_complete(qc);
4499 } 4800 }
@@ -4673,6 +4974,14 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4673 } 4974 }
4674 } 4975 }
4675 4976
4977 /* Some controllers show flaky interrupt behavior after
4978 * setting xfer mode. Use polling instead.
4979 */
4980 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4981 qc->tf.feature == SETFEATURES_XFER) &&
4982 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4983 qc->tf.flags |= ATA_TFLAG_POLLING;
4984
4676 /* select the device */ 4985 /* select the device */
4677 ata_dev_select(ap, qc->dev->devno, 1, 0); 4986 ata_dev_select(ap, qc->dev->devno, 1, 0);
4678 4987
@@ -4781,6 +5090,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4781inline unsigned int ata_host_intr (struct ata_port *ap, 5090inline unsigned int ata_host_intr (struct ata_port *ap,
4782 struct ata_queued_cmd *qc) 5091 struct ata_queued_cmd *qc)
4783{ 5092{
5093 struct ata_eh_info *ehi = &ap->eh_info;
4784 u8 status, host_stat = 0; 5094 u8 status, host_stat = 0;
4785 5095
4786 VPRINTK("ata%u: protocol %d task_state %d\n", 5096 VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -4841,6 +5151,11 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
4841 ap->ops->irq_clear(ap); 5151 ap->ops->irq_clear(ap);
4842 5152
4843 ata_hsm_move(ap, qc, status, 0); 5153 ata_hsm_move(ap, qc, status, 0);
5154
5155 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5156 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5157 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5158
4844 return 1; /* irq handled */ 5159 return 1; /* irq handled */
4845 5160
4846idle_irq: 5161idle_irq:
@@ -5047,7 +5362,7 @@ int ata_flush_cache(struct ata_device *dev)
5047 if (!ata_try_flush_cache(dev)) 5362 if (!ata_try_flush_cache(dev))
5048 return 0; 5363 return 0;
5049 5364
5050 if (ata_id_has_flush_ext(dev->id)) 5365 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5051 cmd = ATA_CMD_FLUSH_EXT; 5366 cmd = ATA_CMD_FLUSH_EXT;
5052 else 5367 else
5053 cmd = ATA_CMD_FLUSH; 5368 cmd = ATA_CMD_FLUSH;
@@ -5519,9 +5834,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
5519 ap->ioaddr.bmdma_addr, 5834 ap->ioaddr.bmdma_addr,
5520 irq_line); 5835 irq_line);
5521 5836
5522 ata_chk_status(ap); 5837 /* freeze port before requesting IRQ */
5523 host->ops->irq_clear(ap); 5838 ata_eh_freeze_port(ap);
5524 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5525 } 5839 }
5526 5840
5527 /* obtain irq, that may be shared between channels */ 5841 /* obtain irq, that may be shared between channels */
@@ -6119,6 +6433,7 @@ EXPORT_SYMBOL_GPL(__sata_phy_reset);
6119EXPORT_SYMBOL_GPL(ata_bus_reset); 6433EXPORT_SYMBOL_GPL(ata_bus_reset);
6120EXPORT_SYMBOL_GPL(ata_std_prereset); 6434EXPORT_SYMBOL_GPL(ata_std_prereset);
6121EXPORT_SYMBOL_GPL(ata_std_softreset); 6435EXPORT_SYMBOL_GPL(ata_std_softreset);
6436EXPORT_SYMBOL_GPL(sata_port_hardreset);
6122EXPORT_SYMBOL_GPL(sata_std_hardreset); 6437EXPORT_SYMBOL_GPL(sata_std_hardreset);
6123EXPORT_SYMBOL_GPL(ata_std_postreset); 6438EXPORT_SYMBOL_GPL(ata_std_postreset);
6124EXPORT_SYMBOL_GPL(ata_dev_classify); 6439EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -6145,6 +6460,7 @@ EXPORT_SYMBOL_GPL(ata_host_suspend);
6145EXPORT_SYMBOL_GPL(ata_host_resume); 6460EXPORT_SYMBOL_GPL(ata_host_resume);
6146EXPORT_SYMBOL_GPL(ata_id_string); 6461EXPORT_SYMBOL_GPL(ata_id_string);
6147EXPORT_SYMBOL_GPL(ata_id_c_string); 6462EXPORT_SYMBOL_GPL(ata_id_c_string);
6463EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6148EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6464EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6149 6465
6150EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6466EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6b7cc74fd9..08ad44b3e48f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1136,19 +1136,21 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1136 break; 1136 break;
1137 1137
1138 case ATA_DEV_ATAPI: 1138 case ATA_DEV_ATAPI:
1139 tmp = atapi_eh_request_sense(qc->dev, 1139 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1140 qc->scsicmd->sense_buffer); 1140 tmp = atapi_eh_request_sense(qc->dev,
1141 if (!tmp) { 1141 qc->scsicmd->sense_buffer);
1142 /* ATA_QCFLAG_SENSE_VALID is used to tell 1142 if (!tmp) {
1143 * atapi_qc_complete() that sense data is 1143 /* ATA_QCFLAG_SENSE_VALID is used to
1144 * already valid. 1144 * tell atapi_qc_complete() that sense
1145 * 1145 * data is already valid.
1146 * TODO: interpret sense data and set 1146 *
1147 * appropriate err_mask. 1147 * TODO: interpret sense data and set
1148 */ 1148 * appropriate err_mask.
1149 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1149 */
1150 } else 1150 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1151 qc->err_mask |= tmp; 1151 } else
1152 qc->err_mask |= tmp;
1153 }
1152 } 1154 }
1153 1155
1154 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1156 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
@@ -1433,16 +1435,39 @@ static void ata_eh_report(struct ata_port *ap)
1433 } 1435 }
1434 1436
1435 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1437 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1438 static const char *dma_str[] = {
1439 [DMA_BIDIRECTIONAL] = "bidi",
1440 [DMA_TO_DEVICE] = "out",
1441 [DMA_FROM_DEVICE] = "in",
1442 [DMA_NONE] = "",
1443 };
1436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1444 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1445 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
1446 unsigned int nbytes;
1437 1447
1438 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1448 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1439 continue; 1449 continue;
1440 1450
1441 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x " 1451 nbytes = qc->nbytes;
1442 "Emask 0x%x stat 0x%x err 0x%x (%s)\n", 1452 if (!nbytes)
1443 qc->tag, qc->tf.command, qc->err_mask, 1453 nbytes = qc->nsect << 9;
1444 qc->result_tf.command, qc->result_tf.feature, 1454
1445 ata_err_string(qc->err_mask)); 1455 ata_dev_printk(qc->dev, KERN_ERR,
1456 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1457 "tag %d cdb 0x%x data %u %s\n "
1458 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1459 "Emask 0x%x (%s)\n",
1460 cmd->command, cmd->feature, cmd->nsect,
1461 cmd->lbal, cmd->lbam, cmd->lbah,
1462 cmd->hob_feature, cmd->hob_nsect,
1463 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
1464 cmd->device, qc->tag, qc->cdb[0], nbytes,
1465 dma_str[qc->dma_dir],
1466 res->command, res->feature, res->nsect,
1467 res->lbal, res->lbam, res->lbah,
1468 res->hob_feature, res->hob_nsect,
1469 res->hob_lbal, res->hob_lbam, res->hob_lbah,
1470 res->device, qc->err_mask, ata_err_string(qc->err_mask));
1446 } 1471 }
1447} 1472}
1448 1473
@@ -1634,11 +1659,14 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1634 DPRINTK("ENTER\n"); 1659 DPRINTK("ENTER\n");
1635 1660
1636 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1661 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1637 unsigned int action; 1662 unsigned int action, readid_flags = 0;
1638 1663
1639 dev = &ap->device[i]; 1664 dev = &ap->device[i];
1640 action = ata_eh_dev_action(dev); 1665 action = ata_eh_dev_action(dev);
1641 1666
1667 if (ehc->i.flags & ATA_EHI_DID_RESET)
1668 readid_flags |= ATA_READID_POSTRESET;
1669
1642 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { 1670 if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1643 if (ata_port_offline(ap)) { 1671 if (ata_port_offline(ap)) {
1644 rc = -EIO; 1672 rc = -EIO;
@@ -1646,13 +1674,17 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1646 } 1674 }
1647 1675
1648 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); 1676 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1649 rc = ata_dev_revalidate(dev, 1677 rc = ata_dev_revalidate(dev, readid_flags);
1650 ehc->i.flags & ATA_EHI_DID_RESET);
1651 if (rc) 1678 if (rc)
1652 break; 1679 break;
1653 1680
1654 ata_eh_done(ap, dev, ATA_EH_REVALIDATE); 1681 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1655 1682
1683 /* Configuration may have changed, reconfigure
1684 * transfer mode.
1685 */
1686 ehc->i.flags |= ATA_EHI_SETMODE;
1687
1656 /* schedule the scsi_rescan_device() here */ 1688 /* schedule the scsi_rescan_device() here */
1657 queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 1689 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1658 } else if (dev->class == ATA_DEV_UNKNOWN && 1690 } else if (dev->class == ATA_DEV_UNKNOWN &&
@@ -1660,18 +1692,35 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1660 ata_class_enabled(ehc->classes[dev->devno])) { 1692 ata_class_enabled(ehc->classes[dev->devno])) {
1661 dev->class = ehc->classes[dev->devno]; 1693 dev->class = ehc->classes[dev->devno];
1662 1694
1663 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); 1695 rc = ata_dev_read_id(dev, &dev->class, readid_flags,
1664 if (rc == 0) 1696 dev->id);
1665 rc = ata_dev_configure(dev, 1); 1697 if (rc == 0) {
1698 ehc->i.flags |= ATA_EHI_PRINTINFO;
1699 rc = ata_dev_configure(dev);
1700 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
1701 } else if (rc == -ENOENT) {
1702 /* IDENTIFY was issued to non-existent
1703 * device. No need to reset. Just
1704 * thaw and kill the device.
1705 */
1706 ata_eh_thaw_port(ap);
1707 dev->class = ATA_DEV_UNKNOWN;
1708 rc = 0;
1709 }
1666 1710
1667 if (rc) { 1711 if (rc) {
1668 dev->class = ATA_DEV_UNKNOWN; 1712 dev->class = ATA_DEV_UNKNOWN;
1669 break; 1713 break;
1670 } 1714 }
1671 1715
1672 spin_lock_irqsave(ap->lock, flags); 1716 if (ata_dev_enabled(dev)) {
1673 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1717 spin_lock_irqsave(ap->lock, flags);
1674 spin_unlock_irqrestore(ap->lock, flags); 1718 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1719 spin_unlock_irqrestore(ap->lock, flags);
1720
1721 /* new device discovered, configure xfermode */
1722 ehc->i.flags |= ATA_EHI_SETMODE;
1723 }
1675 } 1724 }
1676 } 1725 }
1677 1726
@@ -1987,13 +2036,14 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1987 if (rc) 2036 if (rc)
1988 goto dev_fail; 2037 goto dev_fail;
1989 2038
1990 /* configure transfer mode if the port has been reset */ 2039 /* configure transfer mode if necessary */
1991 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2040 if (ehc->i.flags & ATA_EHI_SETMODE) {
1992 rc = ata_set_mode(ap, &dev); 2041 rc = ata_set_mode(ap, &dev);
1993 if (rc) { 2042 if (rc) {
1994 down_xfermask = 1; 2043 down_xfermask = 1;
1995 goto dev_fail; 2044 goto dev_fail;
1996 } 2045 }
2046 ehc->i.flags &= ~ATA_EHI_SETMODE;
1997 } 2047 }
1998 2048
1999 /* suspend devices */ 2049 /* suspend devices */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 4c32d93d44b1..664e1377b54c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -671,7 +671,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
671} 671}
672 672
673/* 673/*
674 * ata_gen_ata_desc_sense - Generate check condition sense block. 674 * ata_gen_passthru_sense - Generate check condition sense block.
675 * @qc: Command that completed. 675 * @qc: Command that completed.
676 * 676 *
677 * This function is specific to the ATA descriptor format sense 677 * This function is specific to the ATA descriptor format sense
@@ -681,9 +681,9 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
681 * block. Clear sense key, ASC & ASCQ if there is no error. 681 * block. Clear sense key, ASC & ASCQ if there is no error.
682 * 682 *
683 * LOCKING: 683 * LOCKING:
684 * spin_lock_irqsave(host lock) 684 * None.
685 */ 685 */
686void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 686static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
687{ 687{
688 struct scsi_cmnd *cmd = qc->scsicmd; 688 struct scsi_cmnd *cmd = qc->scsicmd;
689 struct ata_taskfile *tf = &qc->result_tf; 689 struct ata_taskfile *tf = &qc->result_tf;
@@ -713,12 +713,9 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
713 713
714 desc[0] = 0x09; 714 desc[0] = 0x09;
715 715
716 /* 716 /* set length of additional sense data */
717 * Set length of additional sense data. 717 sb[7] = 14;
718 * Since we only populate descriptor 0, the total 718 desc[1] = 12;
719 * length is the same (fixed) length as descriptor 0.
720 */
721 desc[1] = sb[7] = 14;
722 719
723 /* 720 /*
724 * Copy registers into sense buffer. 721 * Copy registers into sense buffer.
@@ -746,56 +743,56 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
746} 743}
747 744
748/** 745/**
749 * ata_gen_fixed_sense - generate a SCSI fixed sense block 746 * ata_gen_ata_sense - generate a SCSI fixed sense block
750 * @qc: Command that we are erroring out 747 * @qc: Command that we are erroring out
751 * 748 *
752 * Leverage ata_to_sense_error() to give us the codes. Fit our 749 * Generate sense block for a failed ATA command @qc. Descriptor
753 * LBA in here if there's room. 750 * format is used to accomodate LBA48 block address.
754 * 751 *
755 * LOCKING: 752 * LOCKING:
756 * inherited from caller 753 * None.
757 */ 754 */
758void ata_gen_fixed_sense(struct ata_queued_cmd *qc) 755static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
759{ 756{
757 struct ata_device *dev = qc->dev;
760 struct scsi_cmnd *cmd = qc->scsicmd; 758 struct scsi_cmnd *cmd = qc->scsicmd;
761 struct ata_taskfile *tf = &qc->result_tf; 759 struct ata_taskfile *tf = &qc->result_tf;
762 unsigned char *sb = cmd->sense_buffer; 760 unsigned char *sb = cmd->sense_buffer;
761 unsigned char *desc = sb + 8;
763 int verbose = qc->ap->ops->error_handler == NULL; 762 int verbose = qc->ap->ops->error_handler == NULL;
763 u64 block;
764 764
765 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 765 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
766 766
767 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 767 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
768 768
769 /* 769 /* sense data is current and format is descriptor */
770 * Use ata_to_sense_error() to map status register bits 770 sb[0] = 0x72;
771
772 /* Use ata_to_sense_error() to map status register bits
771 * onto sense key, asc & ascq. 773 * onto sense key, asc & ascq.
772 */ 774 */
773 if (qc->err_mask || 775 if (qc->err_mask ||
774 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 776 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
775 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 777 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
776 &sb[2], &sb[12], &sb[13], verbose); 778 &sb[1], &sb[2], &sb[3], verbose);
777 sb[2] &= 0x0f; 779 sb[1] &= 0x0f;
778 } 780 }
779 781
780 sb[0] = 0x70; 782 block = ata_tf_read_block(&qc->result_tf, dev);
781 sb[7] = 0x0a;
782
783 if (tf->flags & ATA_TFLAG_LBA48) {
784 /* TODO: find solution for LBA48 descriptors */
785 }
786 783
787 else if (tf->flags & ATA_TFLAG_LBA) { 784 /* information sense data descriptor */
788 /* A small (28b) LBA will fit in the 32b info field */ 785 sb[7] = 12;
789 sb[0] |= 0x80; /* set valid bit */ 786 desc[0] = 0x00;
790 sb[3] = tf->device & 0x0f; 787 desc[1] = 10;
791 sb[4] = tf->lbah;
792 sb[5] = tf->lbam;
793 sb[6] = tf->lbal;
794 }
795 788
796 else { 789 desc[2] |= 0x80; /* valid */
797 /* TODO: C/H/S */ 790 desc[6] = block >> 40;
798 } 791 desc[7] = block >> 32;
792 desc[8] = block >> 24;
793 desc[9] = block >> 16;
794 desc[10] = block >> 8;
795 desc[11] = block;
799} 796}
800 797
801static void ata_scsi_sdev_config(struct scsi_device *sdev) 798static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -807,23 +804,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
807static void ata_scsi_dev_config(struct scsi_device *sdev, 804static void ata_scsi_dev_config(struct scsi_device *sdev,
808 struct ata_device *dev) 805 struct ata_device *dev)
809{ 806{
810 unsigned int max_sectors; 807 /* configure max sectors */
811 808 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
812 /* TODO: 2048 is an arbitrary number, not the
813 * hardware maximum. This should be increased to
814 * 65534 when Jens Axboe's patch for dynamically
815 * determining max_sectors is merged.
816 */
817 max_sectors = ATA_MAX_SECTORS;
818 if (dev->flags & ATA_DFLAG_LBA48)
819 max_sectors = ATA_MAX_SECTORS_LBA48;
820 if (dev->max_sectors)
821 max_sectors = dev->max_sectors;
822 809
823 blk_queue_max_sectors(sdev->request_queue, max_sectors); 810 /* SATA DMA transfers must be multiples of 4 byte, so
824
825 /*
826 * SATA DMA transfers must be multiples of 4 byte, so
827 * we need to pad ATAPI transfers using an extra sg. 811 * we need to pad ATAPI transfers using an extra sg.
828 * Decrement max hw segments accordingly. 812 * Decrement max hw segments accordingly.
829 */ 813 */
@@ -1040,8 +1024,7 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scs
1040 tf->flags |= ATA_TFLAG_DEVICE; 1024 tf->flags |= ATA_TFLAG_DEVICE;
1041 tf->protocol = ATA_PROT_NODATA; 1025 tf->protocol = ATA_PROT_NODATA;
1042 1026
1043 if ((qc->dev->flags & ATA_DFLAG_LBA48) && 1027 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1044 (ata_id_has_flush_ext(qc->dev->id)))
1045 tf->command = ATA_CMD_FLUSH_EXT; 1028 tf->command = ATA_CMD_FLUSH_EXT;
1046 else 1029 else
1047 tf->command = ATA_CMD_FLUSH; 1030 tf->command = ATA_CMD_FLUSH;
@@ -1282,17 +1265,14 @@ nothing_to_do:
1282 1265
1283static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) 1266static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
1284{ 1267{
1285 struct ata_taskfile *tf = &qc->tf; 1268 unsigned int tf_flags = 0;
1286 struct ata_device *dev = qc->dev;
1287 u64 block; 1269 u64 block;
1288 u32 n_block; 1270 u32 n_block;
1289 1271 int rc;
1290 qc->flags |= ATA_QCFLAG_IO;
1291 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1292 1272
1293 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1273 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
1294 scsicmd[0] == WRITE_16) 1274 scsicmd[0] == WRITE_16)
1295 tf->flags |= ATA_TFLAG_WRITE; 1275 tf_flags |= ATA_TFLAG_WRITE;
1296 1276
1297 /* Calculate the SCSI LBA, transfer length and FUA. */ 1277 /* Calculate the SCSI LBA, transfer length and FUA. */
1298 switch (scsicmd[0]) { 1278 switch (scsicmd[0]) {
@@ -1300,7 +1280,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1300 case WRITE_10: 1280 case WRITE_10:
1301 scsi_10_lba_len(scsicmd, &block, &n_block); 1281 scsi_10_lba_len(scsicmd, &block, &n_block);
1302 if (unlikely(scsicmd[1] & (1 << 3))) 1282 if (unlikely(scsicmd[1] & (1 << 3)))
1303 tf->flags |= ATA_TFLAG_FUA; 1283 tf_flags |= ATA_TFLAG_FUA;
1304 break; 1284 break;
1305 case READ_6: 1285 case READ_6:
1306 case WRITE_6: 1286 case WRITE_6:
@@ -1316,7 +1296,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1316 case WRITE_16: 1296 case WRITE_16:
1317 scsi_16_lba_len(scsicmd, &block, &n_block); 1297 scsi_16_lba_len(scsicmd, &block, &n_block);
1318 if (unlikely(scsicmd[1] & (1 << 3))) 1298 if (unlikely(scsicmd[1] & (1 << 3)))
1319 tf->flags |= ATA_TFLAG_FUA; 1299 tf_flags |= ATA_TFLAG_FUA;
1320 break; 1300 break;
1321 default: 1301 default:
1322 DPRINTK("no-byte command\n"); 1302 DPRINTK("no-byte command\n");
@@ -1334,106 +1314,17 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1334 */ 1314 */
1335 goto nothing_to_do; 1315 goto nothing_to_do;
1336 1316
1337 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | 1317 qc->flags |= ATA_QCFLAG_IO;
1338 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) { 1318 qc->nsect = n_block;
1339 /* yay, NCQ */
1340 if (!lba_48_ok(block, n_block))
1341 goto out_of_range;
1342
1343 tf->protocol = ATA_PROT_NCQ;
1344 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1345
1346 if (tf->flags & ATA_TFLAG_WRITE)
1347 tf->command = ATA_CMD_FPDMA_WRITE;
1348 else
1349 tf->command = ATA_CMD_FPDMA_READ;
1350
1351 qc->nsect = n_block;
1352
1353 tf->nsect = qc->tag << 3;
1354 tf->hob_feature = (n_block >> 8) & 0xff;
1355 tf->feature = n_block & 0xff;
1356
1357 tf->hob_lbah = (block >> 40) & 0xff;
1358 tf->hob_lbam = (block >> 32) & 0xff;
1359 tf->hob_lbal = (block >> 24) & 0xff;
1360 tf->lbah = (block >> 16) & 0xff;
1361 tf->lbam = (block >> 8) & 0xff;
1362 tf->lbal = block & 0xff;
1363
1364 tf->device = 1 << 6;
1365 if (tf->flags & ATA_TFLAG_FUA)
1366 tf->device |= 1 << 7;
1367 } else if (dev->flags & ATA_DFLAG_LBA) {
1368 tf->flags |= ATA_TFLAG_LBA;
1369
1370 if (lba_28_ok(block, n_block)) {
1371 /* use LBA28 */
1372 tf->device |= (block >> 24) & 0xf;
1373 } else if (lba_48_ok(block, n_block)) {
1374 if (!(dev->flags & ATA_DFLAG_LBA48))
1375 goto out_of_range;
1376
1377 /* use LBA48 */
1378 tf->flags |= ATA_TFLAG_LBA48;
1379
1380 tf->hob_nsect = (n_block >> 8) & 0xff;
1381
1382 tf->hob_lbah = (block >> 40) & 0xff;
1383 tf->hob_lbam = (block >> 32) & 0xff;
1384 tf->hob_lbal = (block >> 24) & 0xff;
1385 } else
1386 /* request too large even for LBA48 */
1387 goto out_of_range;
1388
1389 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1390 goto invalid_fld;
1391
1392 qc->nsect = n_block;
1393 tf->nsect = n_block & 0xff;
1394
1395 tf->lbah = (block >> 16) & 0xff;
1396 tf->lbam = (block >> 8) & 0xff;
1397 tf->lbal = block & 0xff;
1398
1399 tf->device |= ATA_LBA;
1400 } else {
1401 /* CHS */
1402 u32 sect, head, cyl, track;
1403
1404 /* The request -may- be too large for CHS addressing. */
1405 if (!lba_28_ok(block, n_block))
1406 goto out_of_range;
1407
1408 if (unlikely(ata_rwcmd_protocol(qc) < 0))
1409 goto invalid_fld;
1410
1411 /* Convert LBA to CHS */
1412 track = (u32)block / dev->sectors;
1413 cyl = track / dev->heads;
1414 head = track % dev->heads;
1415 sect = (u32)block % dev->sectors + 1;
1416
1417 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1418 (u32)block, track, cyl, head, sect);
1419
1420 /* Check whether the converted CHS can fit.
1421 Cylinder: 0-65535
1422 Head: 0-15
1423 Sector: 1-255*/
1424 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1425 goto out_of_range;
1426
1427 qc->nsect = n_block;
1428 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1429 tf->lbal = sect;
1430 tf->lbam = cyl;
1431 tf->lbah = cyl >> 8;
1432 tf->device |= head;
1433 }
1434 1319
1435 return 0; 1320 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1321 qc->tag);
1322 if (likely(rc == 0))
1323 return 0;
1436 1324
1325 if (rc == -ERANGE)
1326 goto out_of_range;
1327 /* treat all other errors as -EINVAL, fall through */
1437invalid_fld: 1328invalid_fld:
1438 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); 1329 ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
1439 /* "Invalid field in cbd" */ 1330 /* "Invalid field in cbd" */
@@ -1477,7 +1368,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1477 */ 1368 */
1478 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1369 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1479 ((cdb[2] & 0x20) || need_sense)) { 1370 ((cdb[2] & 0x20) || need_sense)) {
1480 ata_gen_ata_desc_sense(qc); 1371 ata_gen_passthru_sense(qc);
1481 } else { 1372 } else {
1482 if (!need_sense) { 1373 if (!need_sense) {
1483 cmd->result = SAM_STAT_GOOD; 1374 cmd->result = SAM_STAT_GOOD;
@@ -1488,7 +1379,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1488 * good for smaller LBA (and maybe CHS?) 1379 * good for smaller LBA (and maybe CHS?)
1489 * devices. 1380 * devices.
1490 */ 1381 */
1491 ata_gen_fixed_sense(qc); 1382 ata_gen_ata_sense(qc);
1492 } 1383 }
1493 } 1384 }
1494 1385
@@ -1715,6 +1606,22 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1715} 1606}
1716 1607
1717/** 1608/**
1609 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
1610 * @idx: byte index into SCSI response buffer
1611 * @val: value to set
1612 *
1613 * To be used by SCSI command simulator functions. This macros
1614 * expects two local variables, u8 *rbuf and unsigned int buflen,
1615 * are in scope.
1616 *
1617 * LOCKING:
1618 * None.
1619 */
1620#define ATA_SCSI_RBUF_SET(idx, val) do { \
1621 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
1622 } while (0)
1623
1624/**
1718 * ata_scsiop_inq_std - Simulate INQUIRY command 1625 * ata_scsiop_inq_std - Simulate INQUIRY command
1719 * @args: device IDENTIFY data / SCSI command of interest. 1626 * @args: device IDENTIFY data / SCSI command of interest.
1720 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1627 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -2173,67 +2080,42 @@ saving_not_supp:
2173 * Simulate READ CAPACITY commands. 2080 * Simulate READ CAPACITY commands.
2174 * 2081 *
2175 * LOCKING: 2082 * LOCKING:
2176 * spin_lock_irqsave(host lock) 2083 * None.
2177 */ 2084 */
2178
2179unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2085unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
2180 unsigned int buflen) 2086 unsigned int buflen)
2181{ 2087{
2182 u64 n_sectors; 2088 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
2183 u32 tmp;
2184 2089
2185 VPRINTK("ENTER\n"); 2090 VPRINTK("ENTER\n");
2186 2091
2187 if (ata_id_has_lba(args->id)) {
2188 if (ata_id_has_lba48(args->id))
2189 n_sectors = ata_id_u64(args->id, 100);
2190 else
2191 n_sectors = ata_id_u32(args->id, 60);
2192 } else {
2193 /* CHS default translation */
2194 n_sectors = args->id[1] * args->id[3] * args->id[6];
2195
2196 if (ata_id_current_chs_valid(args->id))
2197 /* CHS current translation */
2198 n_sectors = ata_id_u32(args->id, 57);
2199 }
2200
2201 n_sectors--; /* ATA TotalUserSectors - 1 */
2202
2203 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2092 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2204 if( n_sectors >= 0xffffffffULL ) 2093 if (last_lba >= 0xffffffffULL)
2205 tmp = 0xffffffff ; /* Return max count on overflow */ 2094 last_lba = 0xffffffff;
2206 else
2207 tmp = n_sectors ;
2208 2095
2209 /* sector count, 32-bit */ 2096 /* sector count, 32-bit */
2210 rbuf[0] = tmp >> (8 * 3); 2097 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3));
2211 rbuf[1] = tmp >> (8 * 2); 2098 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2));
2212 rbuf[2] = tmp >> (8 * 1); 2099 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1));
2213 rbuf[3] = tmp; 2100 ATA_SCSI_RBUF_SET(3, last_lba);
2214 2101
2215 /* sector size */ 2102 /* sector size */
2216 tmp = ATA_SECT_SIZE; 2103 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
2217 rbuf[6] = tmp >> 8; 2104 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
2218 rbuf[7] = tmp;
2219
2220 } else { 2105 } else {
2221 /* sector count, 64-bit */ 2106 /* sector count, 64-bit */
2222 tmp = n_sectors >> (8 * 4); 2107 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
2223 rbuf[2] = tmp >> (8 * 3); 2108 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6));
2224 rbuf[3] = tmp >> (8 * 2); 2109 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5));
2225 rbuf[4] = tmp >> (8 * 1); 2110 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4));
2226 rbuf[5] = tmp; 2111 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3));
2227 tmp = n_sectors; 2112 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2));
2228 rbuf[6] = tmp >> (8 * 3); 2113 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1));
2229 rbuf[7] = tmp >> (8 * 2); 2114 ATA_SCSI_RBUF_SET(7, last_lba);
2230 rbuf[8] = tmp >> (8 * 1);
2231 rbuf[9] = tmp;
2232 2115
2233 /* sector size */ 2116 /* sector size */
2234 tmp = ATA_SECT_SIZE; 2117 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
2235 rbuf[12] = tmp >> 8; 2118 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
2236 rbuf[13] = tmp;
2237 } 2119 }
2238 2120
2239 return 0; 2121 return 0;
@@ -2319,7 +2201,7 @@ static void atapi_sense_complete(struct ata_queued_cmd *qc)
2319 * a sense descriptors, since that's only 2201 * a sense descriptors, since that's only
2320 * correct for ATA, not ATAPI 2202 * correct for ATA, not ATAPI
2321 */ 2203 */
2322 ata_gen_ata_desc_sense(qc); 2204 ata_gen_passthru_sense(qc);
2323 } 2205 }
2324 2206
2325 qc->scsidone(qc->scsicmd); 2207 qc->scsidone(qc->scsicmd);
@@ -2394,7 +2276,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2394 * sense descriptors, since that's only 2276 * sense descriptors, since that's only
2395 * correct for ATA, not ATAPI 2277 * correct for ATA, not ATAPI
2396 */ 2278 */
2397 ata_gen_ata_desc_sense(qc); 2279 ata_gen_passthru_sense(qc);
2398 } 2280 }
2399 2281
2400 /* SCSI EH automatically locks door if sdev->locked is 2282 /* SCSI EH automatically locks door if sdev->locked is
@@ -2427,7 +2309,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2427 * a sense descriptors, since that's only 2309 * a sense descriptors, since that's only
2428 * correct for ATA, not ATAPI 2310 * correct for ATA, not ATAPI
2429 */ 2311 */
2430 ata_gen_ata_desc_sense(qc); 2312 ata_gen_passthru_sense(qc);
2431 } else { 2313 } else {
2432 u8 *scsicmd = cmd->cmnd; 2314 u8 *scsicmd = cmd->cmnd;
2433 2315
@@ -3183,10 +3065,12 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3183 rc = -EINVAL; 3065 rc = -EINVAL;
3184 } 3066 }
3185 3067
3186 if (rc == 0) 3068 if (rc == 0) {
3187 ata_port_schedule_eh(ap); 3069 ata_port_schedule_eh(ap);
3188 3070 spin_unlock_irqrestore(ap->lock, flags);
3189 spin_unlock_irqrestore(ap->lock, flags); 3071 ata_port_wait_eh(ap);
3072 } else
3073 spin_unlock_irqrestore(ap->lock, flags);
3190 3074
3191 return rc; 3075 return rc;
3192} 3076}
@@ -3207,15 +3091,27 @@ void ata_scsi_dev_rescan(struct work_struct *work)
3207{ 3091{
3208 struct ata_port *ap = 3092 struct ata_port *ap =
3209 container_of(work, struct ata_port, scsi_rescan_task); 3093 container_of(work, struct ata_port, scsi_rescan_task);
3210 struct ata_device *dev; 3094 unsigned long flags;
3211 unsigned int i; 3095 unsigned int i;
3212 3096
3097 spin_lock_irqsave(ap->lock, flags);
3098
3213 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3099 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3214 dev = &ap->device[i]; 3100 struct ata_device *dev = &ap->device[i];
3101 struct scsi_device *sdev = dev->sdev;
3215 3102
3216 if (ata_dev_enabled(dev) && dev->sdev) 3103 if (!ata_dev_enabled(dev) || !sdev)
3217 scsi_rescan_device(&(dev->sdev->sdev_gendev)); 3104 continue;
3105 if (scsi_device_get(sdev))
3106 continue;
3107
3108 spin_unlock_irqrestore(ap->lock, flags);
3109 scsi_rescan_device(&(sdev->sdev_gendev));
3110 scsi_device_put(sdev);
3111 spin_lock_irqsave(ap->lock, flags);
3218 } 3112 }
3113
3114 spin_unlock_irqrestore(ap->lock, flags);
3219} 3115}
3220 3116
3221/** 3117/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7645f2b30ccf..10ee22ae5c15 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -39,6 +39,35 @@
39#include "libata.h" 39#include "libata.h"
40 40
41/** 41/**
42 * ata_irq_on - Enable interrupts on a port.
43 * @ap: Port on which interrupts are enabled.
44 *
45 * Enable interrupts on a legacy IDE device using MMIO or PIO,
46 * wait for idle, clear any pending interrupts.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51u8 ata_irq_on(struct ata_port *ap)
52{
53 struct ata_ioports *ioaddr = &ap->ioaddr;
54 u8 tmp;
55
56 ap->ctl &= ~ATA_NIEN;
57 ap->last_ctl = ap->ctl;
58
59 if (ap->flags & ATA_FLAG_MMIO)
60 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
61 else
62 outb(ap->ctl, ioaddr->ctl_addr);
63 tmp = ata_wait_idle(ap);
64
65 ap->ops->irq_clear(ap);
66
67 return tmp;
68}
69
70/**
42 * ata_tf_load_pio - send taskfile registers to host controller 71 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent 72 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set 73 * @tf: ATA taskfile register set
@@ -671,6 +700,14 @@ void ata_bmdma_freeze(struct ata_port *ap)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr); 700 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else 701 else
673 outb(ap->ctl, ioaddr->ctl_addr); 702 outb(ap->ctl, ioaddr->ctl_addr);
703
704 /* Under certain circumstances, some controllers raise IRQ on
705 * ATA_NIEN manipulation. Also, many controllers fail to mask
706 * previously pending IRQ on ATA_NIEN assertion. Clear it.
707 */
708 ata_chk_status(ap);
709
710 ap->ops->irq_clear(ap);
674} 711}
675 712
676/** 713/**
@@ -714,7 +751,6 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 751 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset) 752 ata_postreset_fn_t postreset)
716{ 753{
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc; 754 struct ata_queued_cmd *qc;
719 unsigned long flags; 755 unsigned long flags;
720 int thaw = 0; 756 int thaw = 0;
@@ -732,9 +768,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { 768 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat; 769 u8 host_stat;
734 770
735 host_stat = ata_bmdma_status(ap); 771 host_stat = ap->ops->bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738 772
739 /* BMDMA controllers indicate host bus error by 773 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't 774 * setting DMA_ERR bit and timing out. As it wasn't
@@ -877,6 +911,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
877 return NULL; 911 return NULL;
878 912
879 probe_ent->n_ports = 2; 913 probe_ent->n_ports = 2;
914 probe_ent->irq_flags = IRQF_SHARED;
880 915
881 if (port_mask & ATA_PORT_PRIMARY) { 916 if (port_mask & ATA_PORT_PRIMARY) {
882 probe_ent->irq = ATA_PRIMARY_IRQ; 917 probe_ent->irq = ATA_PRIMARY_IRQ;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 7e0f3aff873d..81ae41d5f23f 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -39,26 +39,39 @@ struct ata_scsi_args {
39}; 39};
40 40
41/* libata-core.c */ 41/* libata-core.c */
42enum {
43 /* flags for ata_dev_read_id() */
44 ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */
45};
46
42extern struct workqueue_struct *ata_aux_wq; 47extern struct workqueue_struct *ata_aux_wq;
43extern int atapi_enabled; 48extern int atapi_enabled;
44extern int atapi_dmadir; 49extern int atapi_dmadir;
45extern int libata_fua; 50extern int libata_fua;
46extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 51extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 52extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
53 u64 block, u32 n_block, unsigned int tf_flags,
54 unsigned int tag);
55extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
48extern void ata_dev_disable(struct ata_device *dev); 56extern void ata_dev_disable(struct ata_device *dev);
49extern void ata_port_flush_task(struct ata_port *ap); 57extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_device *dev, 58extern unsigned ata_exec_internal(struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb, 59 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen); 60 int dma_dir, void *buf, unsigned int buflen);
61extern unsigned ata_exec_internal_sg(struct ata_device *dev,
62 struct ata_taskfile *tf, const u8 *cdb,
63 int dma_dir, struct scatterlist *sg,
64 unsigned int n_elem);
53extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); 65extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
54extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 66extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
55 int post_reset, u16 *id); 67 unsigned int flags, u16 *id);
56extern int ata_dev_revalidate(struct ata_device *dev, int post_reset); 68extern int ata_dev_revalidate(struct ata_device *dev, unsigned int flags);
57extern int ata_dev_configure(struct ata_device *dev, int print_info); 69extern int ata_dev_configure(struct ata_device *dev);
58extern int sata_down_spd_limit(struct ata_port *ap); 70extern int sata_down_spd_limit(struct ata_port *ap);
59extern int sata_set_spd_needed(struct ata_port *ap); 71extern int sata_set_spd_needed(struct ata_port *ap);
60extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0); 72extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
61extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); 73extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
74extern void ata_sg_clean(struct ata_queued_cmd *qc);
62extern void ata_qc_free(struct ata_queued_cmd *qc); 75extern void ata_qc_free(struct ata_queued_cmd *qc);
63extern void ata_qc_issue(struct ata_queued_cmd *qc); 76extern void ata_qc_issue(struct ata_queued_cmd *qc);
64extern void __ata_qc_complete(struct ata_queued_cmd *qc); 77extern void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -120,4 +133,7 @@ extern void ata_scsi_error(struct Scsi_Host *host);
120extern void ata_port_wait_eh(struct ata_port *ap); 133extern void ata_port_wait_eh(struct ata_port *ap);
121extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 134extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
122 135
136/* libata-sff.c */
137extern u8 ata_irq_on(struct ata_port *ap);
138
123#endif /* __LIBATA_H__ */ 139#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 64eed99f6814..c5d61d1911a5 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
34#include <linux/dmi.h> 34#include <linux/dmi.h>
35 35
36#define DRV_NAME "pata_ali" 36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.6.6" 37#define DRV_VERSION "0.7.2"
38 38
39/* 39/*
40 * Cable special cases 40 * Cable special cases
@@ -78,7 +78,7 @@ static int ali_c2_cable_detect(struct ata_port *ap)
78 implement the detect logic */ 78 implement the detect logic */
79 79
80 if (ali_cable_override(pdev)) 80 if (ali_cable_override(pdev))
81 return ATA_CBL_PATA80; 81 return ATA_CBL_PATA40_SHORT;
82 82
83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary 83 /* Host view cable detect 0x4A bit 0 primary bit 1 secondary
84 Bit set for 40 pin */ 84 Bit set for 40 pin */
@@ -337,9 +337,6 @@ static struct scsi_host_template ali_sht = {
337 .can_queue = ATA_DEF_QUEUE, 337 .can_queue = ATA_DEF_QUEUE,
338 .this_id = ATA_SHT_THIS_ID, 338 .this_id = ATA_SHT_THIS_ID,
339 .sg_tablesize = LIBATA_MAX_PRD, 339 .sg_tablesize = LIBATA_MAX_PRD,
340 /* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
341 with older controllers. Not locked so will grow on C5 or later */
342 .max_sectors = 255,
343 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 340 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
344 .emulated = ATA_SHT_EMULATED, 341 .emulated = ATA_SHT_EMULATED,
345 .use_clustering = ATA_SHT_USE_CLUSTERING, 342 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -348,6 +345,8 @@ static struct scsi_host_template ali_sht = {
348 .slave_configure = ata_scsi_slave_config, 345 .slave_configure = ata_scsi_slave_config,
349 .slave_destroy = ata_scsi_slave_destroy, 346 .slave_destroy = ata_scsi_slave_destroy,
350 .bios_param = ata_std_bios_param, 347 .bios_param = ata_std_bios_param,
348 .resume = ata_scsi_device_resume,
349 .suspend = ata_scsi_device_suspend,
351}; 350};
352 351
353/* 352/*
@@ -497,6 +496,69 @@ static struct ata_port_operations ali_c5_port_ops = {
497 .host_stop = ata_host_stop 496 .host_stop = ata_host_stop
498}; 497};
499 498
499
500/**
501 * ali_init_chipset - chip setup function
502 * @pdev: PCI device of ATA controller
503 *
504 * Perform the setup on the device that must be done both at boot
505 * and at resume time.
506 */
507
508static void ali_init_chipset(struct pci_dev *pdev)
509{
510 u8 rev, tmp;
511 struct pci_dev *north, *isa_bridge;
512
513 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
514
515 /*
516 * The chipset revision selects the driver operations and
517 * mode data.
518 */
519
520 if (rev >= 0x20 && rev < 0xC2) {
521 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
522 pci_read_config_byte(pdev, 0x4B, &tmp);
523 /* Clear CD-ROM DMA write bit */
524 tmp &= 0x7F;
525 pci_write_config_byte(pdev, 0x4B, tmp);
526 } else if (rev >= 0xC2) {
527 /* Enable cable detection logic */
528 pci_read_config_byte(pdev, 0x4B, &tmp);
529 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
530 }
531 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
532 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
533
534 if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
535 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
536 Set the south bridge enable bit */
537 pci_read_config_byte(isa_bridge, 0x79, &tmp);
538 if (rev == 0xC2)
539 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
540 else if (rev > 0xC2 && rev < 0xC5)
541 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
542 }
543 if (rev >= 0x20) {
544 /*
545 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
546 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
547 * via 0x54/55.
548 */
549 pci_read_config_byte(pdev, 0x53, &tmp);
550 if (rev <= 0x20)
551 tmp &= ~0x02;
552 if (rev >= 0xc7)
553 tmp |= 0x03;
554 else
555 tmp |= 0x01; /* CD_ROM enable for DMA */
556 pci_write_config_byte(pdev, 0x53, tmp);
557 }
558 pci_dev_put(isa_bridge);
559 pci_dev_put(north);
560 ata_pci_clear_simplex(pdev);
561}
500/** 562/**
501 * ali_init_one - discovery callback 563 * ali_init_one - discovery callback
502 * @pdev: PCI device ID 564 * @pdev: PCI device ID
@@ -570,7 +632,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
570 632
571 static struct ata_port_info *port_info[2]; 633 static struct ata_port_info *port_info[2];
572 u8 rev, tmp; 634 u8 rev, tmp;
573 struct pci_dev *north, *isa_bridge; 635 struct pci_dev *isa_bridge;
574 636
575 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 637 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
576 638
@@ -582,11 +644,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
582 if (rev < 0x20) { 644 if (rev < 0x20) {
583 port_info[0] = port_info[1] = &info_early; 645 port_info[0] = port_info[1] = &info_early;
584 } else if (rev < 0xC2) { 646 } else if (rev < 0xC2) {
585 /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
586 pci_read_config_byte(pdev, 0x4B, &tmp);
587 /* Clear CD-ROM DMA write bit */
588 tmp &= 0x7F;
589 pci_write_config_byte(pdev, 0x4B, tmp);
590 port_info[0] = port_info[1] = &info_20; 647 port_info[0] = port_info[1] = &info_20;
591 } else if (rev == 0xC2) { 648 } else if (rev == 0xC2) {
592 port_info[0] = port_info[1] = &info_c2; 649 port_info[0] = port_info[1] = &info_c2;
@@ -597,54 +654,25 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
597 } else 654 } else
598 port_info[0] = port_info[1] = &info_c5; 655 port_info[0] = port_info[1] = &info_c5;
599 656
600 if (rev >= 0xC2) { 657 ali_init_chipset(pdev);
601 /* Enable cable detection logic */ 658
602 pci_read_config_byte(pdev, 0x4B, &tmp);
603 pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
604 }
605
606 north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
607 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 659 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
608 660 if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
609 if (north && north->vendor == PCI_VENDOR_ID_AL) { 661 /* Are we paired with a UDMA capable chip */
610 /* Configure the ALi bridge logic. For non ALi rely on BIOS. 662 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
611 Set the south bridge enable bit */ 663 if ((tmp & 0x1E) == 0x12)
612 pci_read_config_byte(isa_bridge, 0x79, &tmp); 664 port_info[0] = port_info[1] = &info_20_udma;
613 if (rev == 0xC2) 665 pci_dev_put(isa_bridge);
614 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
615 else if (rev > 0xC2)
616 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
617 }
618
619 if (rev >= 0x20) {
620 if (rev < 0xC2) {
621 /* Are we paired with a UDMA capable chip */
622 pci_read_config_byte(isa_bridge, 0x5E, &tmp);
623 if ((tmp & 0x1E) == 0x12)
624 port_info[0] = port_info[1] = &info_20_udma;
625 }
626 /*
627 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
628 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
629 * via 0x54/55.
630 */
631 pci_read_config_byte(pdev, 0x53, &tmp);
632 if (rev <= 0x20)
633 tmp &= ~0x02;
634 if (rev >= 0xc7)
635 tmp |= 0x03;
636 else
637 tmp |= 0x01; /* CD_ROM enable for DMA */
638 pci_write_config_byte(pdev, 0x53, tmp);
639 } 666 }
640
641 pci_dev_put(isa_bridge);
642 pci_dev_put(north);
643
644 ata_pci_clear_simplex(pdev);
645 return ata_pci_init_one(pdev, port_info, 2); 667 return ata_pci_init_one(pdev, port_info, 2);
646} 668}
647 669
670static int ali_reinit_one(struct pci_dev *pdev)
671{
672 ali_init_chipset(pdev);
673 return ata_pci_device_resume(pdev);
674}
675
648static const struct pci_device_id ali[] = { 676static const struct pci_device_id ali[] = {
649 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), }, 677 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
650 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), }, 678 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
@@ -656,7 +684,9 @@ static struct pci_driver ali_pci_driver = {
656 .name = DRV_NAME, 684 .name = DRV_NAME,
657 .id_table = ali, 685 .id_table = ali,
658 .probe = ali_init_one, 686 .probe = ali_init_one,
659 .remove = ata_pci_remove_one 687 .remove = ata_pci_remove_one,
688 .suspend = ata_pci_device_suspend,
689 .resume = ali_reinit_one,
660}; 690};
661 691
662static int __init ali_init(void) 692static int __init ali_init(void)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8be46a63af74..a6b330089f22 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_amd" 27#define DRV_NAME "pata_amd"
28#define DRV_VERSION "0.2.4" 28#define DRV_VERSION "0.2.7"
29 29
30/** 30/**
31 * timing_setup - shared timing computation and load 31 * timing_setup - shared timing computation and load
@@ -326,7 +326,6 @@ static struct scsi_host_template amd_sht = {
326 .can_queue = ATA_DEF_QUEUE, 326 .can_queue = ATA_DEF_QUEUE,
327 .this_id = ATA_SHT_THIS_ID, 327 .this_id = ATA_SHT_THIS_ID,
328 .sg_tablesize = LIBATA_MAX_PRD, 328 .sg_tablesize = LIBATA_MAX_PRD,
329 .max_sectors = ATA_MAX_SECTORS,
330 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 329 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
331 .emulated = ATA_SHT_EMULATED, 330 .emulated = ATA_SHT_EMULATED,
332 .use_clustering = ATA_SHT_USE_CLUSTERING, 331 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -335,6 +334,8 @@ static struct scsi_host_template amd_sht = {
335 .slave_configure = ata_scsi_slave_config, 334 .slave_configure = ata_scsi_slave_config,
336 .slave_destroy = ata_scsi_slave_destroy, 335 .slave_destroy = ata_scsi_slave_destroy,
337 .bios_param = ata_std_bios_param, 336 .bios_param = ata_std_bios_param,
337 .resume = ata_scsi_device_resume,
338 .suspend = ata_scsi_device_suspend,
338}; 339};
339 340
340static struct ata_port_operations amd33_port_ops = { 341static struct ata_port_operations amd33_port_ops = {
@@ -662,6 +663,23 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
662 return ata_pci_init_one(pdev, port_info, 2); 663 return ata_pci_init_one(pdev, port_info, 2);
663} 664}
664 665
666static int amd_reinit_one(struct pci_dev *pdev)
667{
668 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
669 u8 fifo;
670 pci_read_config_byte(pdev, 0x41, &fifo);
671 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
672 /* FIFO is broken */
673 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
674 else
675 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
676 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
677 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
678 ata_pci_clear_simplex(pdev);
679 }
680 return ata_pci_device_resume(pdev);
681}
682
665static const struct pci_device_id amd[] = { 683static const struct pci_device_id amd[] = {
666 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, 684 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
667 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, 685 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
@@ -689,7 +707,9 @@ static struct pci_driver amd_pci_driver = {
689 .name = DRV_NAME, 707 .name = DRV_NAME,
690 .id_table = amd, 708 .id_table = amd,
691 .probe = amd_init_one, 709 .probe = amd_init_one,
692 .remove = ata_pci_remove_one 710 .remove = ata_pci_remove_one,
711 .suspend = ata_pci_device_suspend,
712 .resume = amd_reinit_one,
693}; 713};
694 714
695static int __init amd_init(void) 715static int __init amd_init(void)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 2cd30761ca1f..37bc1323bda7 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -307,7 +307,6 @@ static struct scsi_host_template artop_sht = {
307 .can_queue = ATA_DEF_QUEUE, 307 .can_queue = ATA_DEF_QUEUE,
308 .this_id = ATA_SHT_THIS_ID, 308 .this_id = ATA_SHT_THIS_ID,
309 .sg_tablesize = LIBATA_MAX_PRD, 309 .sg_tablesize = LIBATA_MAX_PRD,
310 .max_sectors = ATA_MAX_SECTORS,
311 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 310 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
312 .emulated = ATA_SHT_EMULATED, 311 .emulated = ATA_SHT_EMULATED,
313 .use_clustering = ATA_SHT_USE_CLUSTERING, 312 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 4e1d3b59adbb..6f6672c55131 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -22,7 +22,7 @@
22#include <linux/libata.h> 22#include <linux/libata.h>
23 23
24#define DRV_NAME "pata_atiixp" 24#define DRV_NAME "pata_atiixp"
25#define DRV_VERSION "0.4.3" 25#define DRV_VERSION "0.4.4"
26 26
27enum { 27enum {
28 ATIIXP_IDE_PIO_TIMING = 0x40, 28 ATIIXP_IDE_PIO_TIMING = 0x40,
@@ -209,7 +209,6 @@ static struct scsi_host_template atiixp_sht = {
209 .can_queue = ATA_DEF_QUEUE, 209 .can_queue = ATA_DEF_QUEUE,
210 .this_id = ATA_SHT_THIS_ID, 210 .this_id = ATA_SHT_THIS_ID,
211 .sg_tablesize = LIBATA_MAX_PRD, 211 .sg_tablesize = LIBATA_MAX_PRD,
212 .max_sectors = ATA_MAX_SECTORS,
213 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 212 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
214 .emulated = ATA_SHT_EMULATED, 213 .emulated = ATA_SHT_EMULATED,
215 .use_clustering = ATA_SHT_USE_CLUSTERING, 214 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -218,6 +217,8 @@ static struct scsi_host_template atiixp_sht = {
218 .slave_configure = ata_scsi_slave_config, 217 .slave_configure = ata_scsi_slave_config,
219 .slave_destroy = ata_scsi_slave_destroy, 218 .slave_destroy = ata_scsi_slave_destroy,
220 .bios_param = ata_std_bios_param, 219 .bios_param = ata_std_bios_param,
220 .resume = ata_scsi_device_resume,
221 .suspend = ata_scsi_device_suspend,
221}; 222};
222 223
223static struct ata_port_operations atiixp_port_ops = { 224static struct ata_port_operations atiixp_port_ops = {
@@ -281,7 +282,9 @@ static struct pci_driver atiixp_pci_driver = {
281 .name = DRV_NAME, 282 .name = DRV_NAME,
282 .id_table = atiixp, 283 .id_table = atiixp,
283 .probe = atiixp_init_one, 284 .probe = atiixp_init_one,
284 .remove = ata_pci_remove_one 285 .remove = ata_pci_remove_one,
286 .resume = ata_pci_device_resume,
287 .suspend = ata_pci_device_suspend,
285}; 288};
286 289
287static int __init atiixp_init(void) 290static int __init atiixp_init(void)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 29a60df465da..15841a563694 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
31#include <linux/libata.h> 31#include <linux/libata.h>
32 32
33#define DRV_NAME "pata_cmd64x" 33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.2.1" 34#define DRV_VERSION "0.2.2"
35 35
36/* 36/*
37 * CMD64x specific registers definition. 37 * CMD64x specific registers definition.
@@ -268,7 +268,6 @@ static struct scsi_host_template cmd64x_sht = {
268 .can_queue = ATA_DEF_QUEUE, 268 .can_queue = ATA_DEF_QUEUE,
269 .this_id = ATA_SHT_THIS_ID, 269 .this_id = ATA_SHT_THIS_ID,
270 .sg_tablesize = LIBATA_MAX_PRD, 270 .sg_tablesize = LIBATA_MAX_PRD,
271 .max_sectors = ATA_MAX_SECTORS,
272 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 271 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
273 .emulated = ATA_SHT_EMULATED, 272 .emulated = ATA_SHT_EMULATED,
274 .use_clustering = ATA_SHT_USE_CLUSTERING, 273 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -277,6 +276,8 @@ static struct scsi_host_template cmd64x_sht = {
277 .slave_configure = ata_scsi_slave_config, 276 .slave_configure = ata_scsi_slave_config,
278 .slave_destroy = ata_scsi_slave_destroy, 277 .slave_destroy = ata_scsi_slave_destroy,
279 .bios_param = ata_std_bios_param, 278 .bios_param = ata_std_bios_param,
279 .resume = ata_scsi_device_resume,
280 .suspend = ata_scsi_device_suspend,
280}; 281};
281 282
282static struct ata_port_operations cmd64x_port_ops = { 283static struct ata_port_operations cmd64x_port_ops = {
@@ -469,6 +470,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
469 return ata_pci_init_one(pdev, port_info, 2); 470 return ata_pci_init_one(pdev, port_info, 2);
470} 471}
471 472
473static int cmd64x_reinit_one(struct pci_dev *pdev)
474{
475 u8 mrdmode;
476 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
477 pci_read_config_byte(pdev, MRDMODE, &mrdmode);
478 mrdmode &= ~ 0x30; /* IRQ set up */
479 mrdmode |= 0x02; /* Memory read line enable */
480 pci_write_config_byte(pdev, MRDMODE, mrdmode);
481#ifdef CONFIG_PPC
482 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
483#endif
484 return ata_pci_device_resume(pdev);
485}
486
472static const struct pci_device_id cmd64x[] = { 487static const struct pci_device_id cmd64x[] = {
473 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 }, 488 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
474 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 }, 489 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
@@ -482,7 +497,9 @@ static struct pci_driver cmd64x_pci_driver = {
482 .name = DRV_NAME, 497 .name = DRV_NAME,
483 .id_table = cmd64x, 498 .id_table = cmd64x,
484 .probe = cmd64x_init_one, 499 .probe = cmd64x_init_one,
485 .remove = ata_pci_remove_one 500 .remove = ata_pci_remove_one,
501 .suspend = ata_pci_device_suspend,
502 .resume = cmd64x_reinit_one,
486}; 503};
487 504
488static int __init cmd64x_init(void) 505static int __init cmd64x_init(void)
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 33d2b88f9c79..9f165a8e032d 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_cs5520" 43#define DRV_NAME "pata_cs5520"
44#define DRV_VERSION "0.6.2" 44#define DRV_VERSION "0.6.3"
45 45
46struct pio_clocks 46struct pio_clocks
47{ 47{
@@ -159,7 +159,6 @@ static struct scsi_host_template cs5520_sht = {
159 .can_queue = ATA_DEF_QUEUE, 159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID, 160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD, 161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 162 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED, 163 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING, 164 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -168,6 +167,8 @@ static struct scsi_host_template cs5520_sht = {
168 .slave_configure = ata_scsi_slave_config, 167 .slave_configure = ata_scsi_slave_config,
169 .slave_destroy = ata_scsi_slave_destroy, 168 .slave_destroy = ata_scsi_slave_destroy,
170 .bios_param = ata_std_bios_param, 169 .bios_param = ata_std_bios_param,
170 .resume = ata_scsi_device_resume,
171 .suspend = ata_scsi_device_suspend,
171}; 172};
172 173
173static struct ata_port_operations cs5520_port_ops = { 174static struct ata_port_operations cs5520_port_ops = {
@@ -297,6 +298,22 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
297 dev_set_drvdata(dev, NULL); 298 dev_set_drvdata(dev, NULL);
298} 299}
299 300
301/**
302 * cs5520_reinit_one - device resume
303 * @pdev: PCI device
304 *
305 * Do any reconfiguration work needed by a resume from RAM. We need
306 * to restore DMA mode support on BIOSen which disabled it
307 */
308
309static int cs5520_reinit_one(struct pci_dev *pdev)
310{
311 u8 pcicfg;
312 pci_read_config_byte(pdev, 0x60, &pcicfg);
313 if ((pcicfg & 0x40) == 0)
314 pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
315 return ata_pci_device_resume(pdev);
316}
300/* For now keep DMA off. We can set it for all but A rev CS5510 once the 317/* For now keep DMA off. We can set it for all but A rev CS5510 once the
301 core ATA code can handle it */ 318 core ATA code can handle it */
302 319
@@ -311,7 +328,9 @@ static struct pci_driver cs5520_pci_driver = {
311 .name = DRV_NAME, 328 .name = DRV_NAME,
312 .id_table = pata_cs5520, 329 .id_table = pata_cs5520,
313 .probe = cs5520_init_one, 330 .probe = cs5520_init_one,
314 .remove = cs5520_remove_one 331 .remove = cs5520_remove_one,
332 .suspend = ata_pci_device_suspend,
333 .resume = cs5520_reinit_one,
315}; 334};
316 335
317static int __init cs5520_init(void) 336static int __init cs5520_init(void)
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 981f49223469..1c628014dae6 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -35,7 +35,7 @@
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36 36
37#define DRV_NAME "pata_cs5530" 37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.6" 38#define DRV_VERSION "0.7.1"
39 39
40/** 40/**
41 * cs5530_set_piomode - PIO setup 41 * cs5530_set_piomode - PIO setup
@@ -173,7 +173,6 @@ static struct scsi_host_template cs5530_sht = {
173 .can_queue = ATA_DEF_QUEUE, 173 .can_queue = ATA_DEF_QUEUE,
174 .this_id = ATA_SHT_THIS_ID, 174 .this_id = ATA_SHT_THIS_ID,
175 .sg_tablesize = LIBATA_MAX_PRD, 175 .sg_tablesize = LIBATA_MAX_PRD,
176 .max_sectors = ATA_MAX_SECTORS,
177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 176 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
178 .emulated = ATA_SHT_EMULATED, 177 .emulated = ATA_SHT_EMULATED,
179 .use_clustering = ATA_SHT_USE_CLUSTERING, 178 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -182,6 +181,8 @@ static struct scsi_host_template cs5530_sht = {
182 .slave_configure = ata_scsi_slave_config, 181 .slave_configure = ata_scsi_slave_config,
183 .slave_destroy = ata_scsi_slave_destroy, 182 .slave_destroy = ata_scsi_slave_destroy,
184 .bios_param = ata_std_bios_param, 183 .bios_param = ata_std_bios_param,
184 .resume = ata_scsi_device_resume,
185 .suspend = ata_scsi_device_suspend,
185}; 186};
186 187
187static struct ata_port_operations cs5530_port_ops = { 188static struct ata_port_operations cs5530_port_ops = {
@@ -239,38 +240,18 @@ static int cs5530_is_palmax(void)
239 return 0; 240 return 0;
240} 241}
241 242
243
242/** 244/**
243 * cs5530_init_one - Initialise a CS5530 245 * cs5530_init_chip - Chipset init
244 * @dev: PCI device
245 * @id: Entry in match table
246 * 246 *
247 * Install a driver for the newly found CS5530 companion chip. Most of 247 * Perform the chip initialisation work that is shared between both
248 * this is just housekeeping. We have to set the chip up correctly and 248 * setup and resume paths
249 * turn off various bits of emulation magic.
250 */ 249 */
251 250
252static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id) 251static int cs5530_init_chip(void)
253{ 252{
254 int compiler_warning_pointless_fix; 253 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
255 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
256 static struct ata_port_info info = {
257 .sht = &cs5530_sht,
258 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
259 .pio_mask = 0x1f,
260 .mwdma_mask = 0x07,
261 .udma_mask = 0x07,
262 .port_ops = &cs5530_port_ops
263 };
264 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
265 static struct ata_port_info info_palmax_secondary = {
266 .sht = &cs5530_sht,
267 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
268 .pio_mask = 0x1f,
269 .port_ops = &cs5530_port_ops
270 };
271 static struct ata_port_info *port_info[2] = { &info, &info };
272 254
273 dev = NULL;
274 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { 255 while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
275 switch (dev->device) { 256 switch (dev->device) {
276 case PCI_DEVICE_ID_CYRIX_PCI_MASTER: 257 case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
@@ -291,7 +272,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
291 } 272 }
292 273
293 pci_set_master(cs5530_0); 274 pci_set_master(cs5530_0);
294 compiler_warning_pointless_fix = pci_set_mwi(cs5530_0); 275 pci_set_mwi(cs5530_0);
295 276
296 /* 277 /*
297 * Set PCI CacheLineSize to 16-bytes: 278 * Set PCI CacheLineSize to 16-bytes:
@@ -339,13 +320,7 @@ static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
339 320
340 pci_dev_put(master_0); 321 pci_dev_put(master_0);
341 pci_dev_put(cs5530_0); 322 pci_dev_put(cs5530_0);
342 323 return 0;
343 if (cs5530_is_palmax())
344 port_info[1] = &info_palmax_secondary;
345
346 /* Now kick off ATA set up */
347 return ata_pci_init_one(dev, port_info, 2);
348
349fail_put: 324fail_put:
350 if (master_0) 325 if (master_0)
351 pci_dev_put(master_0); 326 pci_dev_put(master_0);
@@ -354,6 +329,53 @@ fail_put:
354 return -ENODEV; 329 return -ENODEV;
355} 330}
356 331
332/**
333 * cs5530_init_one - Initialise a CS5530
334 * @dev: PCI device
335 * @id: Entry in match table
336 *
337 * Install a driver for the newly found CS5530 companion chip. Most of
338 * this is just housekeeping. We have to set the chip up correctly and
339 * turn off various bits of emulation magic.
340 */
341
342static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
343{
344 static struct ata_port_info info = {
345 .sht = &cs5530_sht,
346 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
347 .pio_mask = 0x1f,
348 .mwdma_mask = 0x07,
349 .udma_mask = 0x07,
350 .port_ops = &cs5530_port_ops
351 };
352 /* The docking connector doesn't do UDMA, and it seems not MWDMA */
353 static struct ata_port_info info_palmax_secondary = {
354 .sht = &cs5530_sht,
355 .flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
356 .pio_mask = 0x1f,
357 .port_ops = &cs5530_port_ops
358 };
359 static struct ata_port_info *port_info[2] = { &info, &info };
360
361 /* Chip initialisation */
362 if (cs5530_init_chip())
363 return -ENODEV;
364
365 if (cs5530_is_palmax())
366 port_info[1] = &info_palmax_secondary;
367
368 /* Now kick off ATA set up */
369 return ata_pci_init_one(pdev, port_info, 2);
370}
371
372static int cs5530_reinit_one(struct pci_dev *pdev)
373{
374 /* If we fail on resume we are doomed */
375 BUG_ON(cs5530_init_chip());
376 return ata_pci_device_resume(pdev);
377}
378
357static const struct pci_device_id cs5530[] = { 379static const struct pci_device_id cs5530[] = {
358 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, 380 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
359 381
@@ -364,7 +386,9 @@ static struct pci_driver cs5530_pci_driver = {
364 .name = DRV_NAME, 386 .name = DRV_NAME,
365 .id_table = cs5530, 387 .id_table = cs5530,
366 .probe = cs5530_init_one, 388 .probe = cs5530_init_one,
367 .remove = ata_pci_remove_one 389 .remove = ata_pci_remove_one,
390 .suspend = ata_pci_device_suspend,
391 .resume = cs5530_reinit_one,
368}; 392};
369 393
370static int __init cs5530_init(void) 394static int __init cs5530_init(void)
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 8dafa4a49fdc..e3efec4ffc79 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -39,7 +39,7 @@
39#include <asm/msr.h> 39#include <asm/msr.h>
40 40
41#define DRV_NAME "cs5535" 41#define DRV_NAME "cs5535"
42#define DRV_VERSION "0.2.10" 42#define DRV_VERSION "0.2.11"
43 43
44/* 44/*
45 * The Geode (Aka Athlon GX now) uses an internal MSR based 45 * The Geode (Aka Athlon GX now) uses an internal MSR based
@@ -177,7 +177,6 @@ static struct scsi_host_template cs5535_sht = {
177 .can_queue = ATA_DEF_QUEUE, 177 .can_queue = ATA_DEF_QUEUE,
178 .this_id = ATA_SHT_THIS_ID, 178 .this_id = ATA_SHT_THIS_ID,
179 .sg_tablesize = LIBATA_MAX_PRD, 179 .sg_tablesize = LIBATA_MAX_PRD,
180 .max_sectors = ATA_MAX_SECTORS,
181 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 180 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
182 .emulated = ATA_SHT_EMULATED, 181 .emulated = ATA_SHT_EMULATED,
183 .use_clustering = ATA_SHT_USE_CLUSTERING, 182 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -186,6 +185,8 @@ static struct scsi_host_template cs5535_sht = {
186 .slave_configure = ata_scsi_slave_config, 185 .slave_configure = ata_scsi_slave_config,
187 .slave_destroy = ata_scsi_slave_destroy, 186 .slave_destroy = ata_scsi_slave_destroy,
188 .bios_param = ata_std_bios_param, 187 .bios_param = ata_std_bios_param,
188 .resume = ata_scsi_device_resume,
189 .suspend = ata_scsi_device_suspend,
189}; 190};
190 191
191static struct ata_port_operations cs5535_port_ops = { 192static struct ata_port_operations cs5535_port_ops = {
@@ -268,7 +269,9 @@ static struct pci_driver cs5535_pci_driver = {
268 .name = DRV_NAME, 269 .name = DRV_NAME,
269 .id_table = cs5535, 270 .id_table = cs5535,
270 .probe = cs5535_init_one, 271 .probe = cs5535_init_one,
271 .remove = ata_pci_remove_one 272 .remove = ata_pci_remove_one,
273 .suspend = ata_pci_device_suspend,
274 .resume = ata_pci_device_resume,
272}; 275};
273 276
274static int __init cs5535_init(void) 277static int __init cs5535_init(void)
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 5a0b811907ee..e2a95699bae7 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -18,7 +18,7 @@
18#include <linux/libata.h> 18#include <linux/libata.h>
19 19
20#define DRV_NAME "pata_cypress" 20#define DRV_NAME "pata_cypress"
21#define DRV_VERSION "0.1.2" 21#define DRV_VERSION "0.1.4"
22 22
23/* here are the offset definitions for the registers */ 23/* here are the offset definitions for the registers */
24 24
@@ -128,7 +128,6 @@ static struct scsi_host_template cy82c693_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -137,6 +136,8 @@ static struct scsi_host_template cy82c693_sht = {
137 .slave_configure = ata_scsi_slave_config, 136 .slave_configure = ata_scsi_slave_config,
138 .slave_destroy = ata_scsi_slave_destroy, 137 .slave_destroy = ata_scsi_slave_destroy,
139 .bios_param = ata_std_bios_param, 138 .bios_param = ata_std_bios_param,
139 .resume = ata_scsi_device_resume,
140 .suspend = ata_scsi_device_suspend,
140}; 141};
141 142
142static struct ata_port_operations cy82c693_port_ops = { 143static struct ata_port_operations cy82c693_port_ops = {
@@ -204,7 +205,9 @@ static struct pci_driver cy82c693_pci_driver = {
204 .name = DRV_NAME, 205 .name = DRV_NAME,
205 .id_table = cy82c693, 206 .id_table = cy82c693,
206 .probe = cy82c693_init_one, 207 .probe = cy82c693_init_one,
207 .remove = ata_pci_remove_one 208 .remove = ata_pci_remove_one,
209 .suspend = ata_pci_device_suspend,
210 .resume = ata_pci_device_resume,
208}; 211};
209 212
210static int __init cy82c693_init(void) 213static int __init cy82c693_init(void)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 755f79279de3..edf8a63f50af 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
22#include <linux/ata.h> 22#include <linux/ata.h>
23 23
24#define DRV_NAME "pata_efar" 24#define DRV_NAME "pata_efar"
25#define DRV_VERSION "0.4.2" 25#define DRV_VERSION "0.4.3"
26 26
27/** 27/**
28 * efar_pre_reset - check for 40/80 pin 28 * efar_pre_reset - check for 40/80 pin
@@ -226,7 +226,6 @@ static struct scsi_host_template efar_sht = {
226 .can_queue = ATA_DEF_QUEUE, 226 .can_queue = ATA_DEF_QUEUE,
227 .this_id = ATA_SHT_THIS_ID, 227 .this_id = ATA_SHT_THIS_ID,
228 .sg_tablesize = LIBATA_MAX_PRD, 228 .sg_tablesize = LIBATA_MAX_PRD,
229 .max_sectors = ATA_MAX_SECTORS,
230 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 229 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
231 .emulated = ATA_SHT_EMULATED, 230 .emulated = ATA_SHT_EMULATED,
232 .use_clustering = ATA_SHT_USE_CLUSTERING, 231 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -235,6 +234,8 @@ static struct scsi_host_template efar_sht = {
235 .slave_configure = ata_scsi_slave_config, 234 .slave_configure = ata_scsi_slave_config,
236 .slave_destroy = ata_scsi_slave_destroy, 235 .slave_destroy = ata_scsi_slave_destroy,
237 .bios_param = ata_std_bios_param, 236 .bios_param = ata_std_bios_param,
237 .resume = ata_scsi_device_resume,
238 .suspend = ata_scsi_device_suspend,
238}; 239};
239 240
240static const struct ata_port_operations efar_ops = { 241static const struct ata_port_operations efar_ops = {
@@ -316,6 +317,8 @@ static struct pci_driver efar_pci_driver = {
316 .id_table = efar_pci_tbl, 317 .id_table = efar_pci_tbl,
317 .probe = efar_init_one, 318 .probe = efar_init_one,
318 .remove = ata_pci_remove_one, 319 .remove = ata_pci_remove_one,
320 .suspend = ata_pci_device_suspend,
321 .resume = ata_pci_device_resume,
319}; 322};
320 323
321static int __init efar_init(void) 324static int __init efar_init(void)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index c0e150a9586b..2663599a7c02 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -27,7 +27,7 @@
27#include <linux/libata.h> 27#include <linux/libata.h>
28 28
29#define DRV_NAME "pata_hpt366" 29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.5" 30#define DRV_VERSION "0.5.3"
31 31
32struct hpt_clock { 32struct hpt_clock {
33 u8 xfer_speed; 33 u8 xfer_speed;
@@ -222,9 +222,17 @@ static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
222 222
223static int hpt36x_pre_reset(struct ata_port *ap) 223static int hpt36x_pre_reset(struct ata_port *ap)
224{ 224{
225 static const struct pci_bits hpt36x_enable_bits[] = {
226 { 0x50, 1, 0x04, 0x04 },
227 { 0x54, 1, 0x04, 0x04 }
228 };
229
225 u8 ata66; 230 u8 ata66;
226 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 231 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
227 232
233 if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no]))
234 return -ENOENT;
235
228 pci_read_config_byte(pdev, 0x5A, &ata66); 236 pci_read_config_byte(pdev, 0x5A, &ata66);
229 if (ata66 & (1 << ap->port_no)) 237 if (ata66 & (1 << ap->port_no))
230 ap->cbl = ATA_CBL_PATA40; 238 ap->cbl = ATA_CBL_PATA40;
@@ -322,7 +330,6 @@ static struct scsi_host_template hpt36x_sht = {
322 .can_queue = ATA_DEF_QUEUE, 330 .can_queue = ATA_DEF_QUEUE,
323 .this_id = ATA_SHT_THIS_ID, 331 .this_id = ATA_SHT_THIS_ID,
324 .sg_tablesize = LIBATA_MAX_PRD, 332 .sg_tablesize = LIBATA_MAX_PRD,
325 .max_sectors = ATA_MAX_SECTORS,
326 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
327 .emulated = ATA_SHT_EMULATED, 334 .emulated = ATA_SHT_EMULATED,
328 .use_clustering = ATA_SHT_USE_CLUSTERING, 335 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -331,6 +338,8 @@ static struct scsi_host_template hpt36x_sht = {
331 .slave_configure = ata_scsi_slave_config, 338 .slave_configure = ata_scsi_slave_config,
332 .slave_destroy = ata_scsi_slave_destroy, 339 .slave_destroy = ata_scsi_slave_destroy,
333 .bios_param = ata_std_bios_param, 340 .bios_param = ata_std_bios_param,
341 .resume = ata_scsi_device_resume,
342 .suspend = ata_scsi_device_suspend,
334}; 343};
335 344
336/* 345/*
@@ -373,6 +382,27 @@ static struct ata_port_operations hpt366_port_ops = {
373}; 382};
374 383
375/** 384/**
385 * hpt36x_init_chipset - common chip setup
386 * @dev: PCI device
387 *
388 * Perform the chip setup work that must be done at both init and
389 * resume time
390 */
391
392static void hpt36x_init_chipset(struct pci_dev *dev)
393{
394 u8 drive_fast;
395 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
396 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
397 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
398 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
399
400 pci_read_config_byte(dev, 0x51, &drive_fast);
401 if (drive_fast & 0x80)
402 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
403}
404
405/**
376 * hpt36x_init_one - Initialise an HPT366/368 406 * hpt36x_init_one - Initialise an HPT366/368
377 * @dev: PCI device 407 * @dev: PCI device
378 * @id: Entry in match table 408 * @id: Entry in match table
@@ -407,7 +437,6 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
407 437
408 u32 class_rev; 438 u32 class_rev;
409 u32 reg1; 439 u32 reg1;
410 u8 drive_fast;
411 440
412 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 441 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
413 class_rev &= 0xFF; 442 class_rev &= 0xFF;
@@ -417,14 +446,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
417 if (class_rev > 2) 446 if (class_rev > 2)
418 return -ENODEV; 447 return -ENODEV;
419 448
420 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 449 hpt36x_init_chipset(dev);
421 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
422 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
423 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
424
425 pci_read_config_byte(dev, 0x51, &drive_fast);
426 if (drive_fast & 0x80)
427 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
428 450
429 pci_read_config_dword(dev, 0x40, &reg1); 451 pci_read_config_dword(dev, 0x40, &reg1);
430 452
@@ -445,9 +467,15 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
445 return ata_pci_init_one(dev, port_info, 2); 467 return ata_pci_init_one(dev, port_info, 2);
446} 468}
447 469
470static int hpt36x_reinit_one(struct pci_dev *dev)
471{
472 hpt36x_init_chipset(dev);
473 return ata_pci_device_resume(dev);
474}
475
476
448static const struct pci_device_id hpt36x[] = { 477static const struct pci_device_id hpt36x[] = {
449 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), }, 478 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
450
451 { }, 479 { },
452}; 480};
453 481
@@ -455,7 +483,9 @@ static struct pci_driver hpt36x_pci_driver = {
455 .name = DRV_NAME, 483 .name = DRV_NAME,
456 .id_table = hpt36x, 484 .id_table = hpt36x,
457 .probe = hpt36x_init_one, 485 .probe = hpt36x_init_one,
458 .remove = ata_pci_remove_one 486 .remove = ata_pci_remove_one,
487 .suspend = ata_pci_device_suspend,
488 .resume = hpt36x_reinit_one,
459}; 489};
460 490
461static int __init hpt36x_init(void) 491static int __init hpt36x_init(void)
@@ -463,13 +493,11 @@ static int __init hpt36x_init(void)
463 return pci_register_driver(&hpt36x_pci_driver); 493 return pci_register_driver(&hpt36x_pci_driver);
464} 494}
465 495
466
467static void __exit hpt36x_exit(void) 496static void __exit hpt36x_exit(void)
468{ 497{
469 pci_unregister_driver(&hpt36x_pci_driver); 498 pci_unregister_driver(&hpt36x_pci_driver);
470} 499}
471 500
472
473MODULE_AUTHOR("Alan Cox"); 501MODULE_AUTHOR("Alan Cox");
474MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368"); 502MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
475MODULE_LICENSE("GPL"); 503MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 1eeb16f0fb02..47082df7199e 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -768,7 +768,6 @@ static struct scsi_host_template hpt37x_sht = {
768 .can_queue = ATA_DEF_QUEUE, 768 .can_queue = ATA_DEF_QUEUE,
769 .this_id = ATA_SHT_THIS_ID, 769 .this_id = ATA_SHT_THIS_ID,
770 .sg_tablesize = LIBATA_MAX_PRD, 770 .sg_tablesize = LIBATA_MAX_PRD,
771 .max_sectors = ATA_MAX_SECTORS,
772 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 771 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
773 .emulated = ATA_SHT_EMULATED, 772 .emulated = ATA_SHT_EMULATED,
774 .use_clustering = ATA_SHT_USE_CLUSTERING, 773 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 47d7664e9eee..f6817b4093a4 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -334,7 +334,6 @@ static struct scsi_host_template hpt3x2n_sht = {
334 .can_queue = ATA_DEF_QUEUE, 334 .can_queue = ATA_DEF_QUEUE,
335 .this_id = ATA_SHT_THIS_ID, 335 .this_id = ATA_SHT_THIS_ID,
336 .sg_tablesize = LIBATA_MAX_PRD, 336 .sg_tablesize = LIBATA_MAX_PRD,
337 .max_sectors = ATA_MAX_SECTORS,
338 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 337 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
339 .emulated = ATA_SHT_EMULATED, 338 .emulated = ATA_SHT_EMULATED,
340 .use_clustering = ATA_SHT_USE_CLUSTERING, 339 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index d216cc564b56..5f1d385eb592 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
23#include <linux/libata.h> 23#include <linux/libata.h>
24 24
25#define DRV_NAME "pata_hpt3x3" 25#define DRV_NAME "pata_hpt3x3"
26#define DRV_VERSION "0.4.1" 26#define DRV_VERSION "0.4.2"
27 27
28static int hpt3x3_probe_init(struct ata_port *ap) 28static int hpt3x3_probe_init(struct ata_port *ap)
29{ 29{
@@ -111,7 +111,6 @@ static struct scsi_host_template hpt3x3_sht = {
111 .can_queue = ATA_DEF_QUEUE, 111 .can_queue = ATA_DEF_QUEUE,
112 .this_id = ATA_SHT_THIS_ID, 112 .this_id = ATA_SHT_THIS_ID,
113 .sg_tablesize = LIBATA_MAX_PRD, 113 .sg_tablesize = LIBATA_MAX_PRD,
114 .max_sectors = ATA_MAX_SECTORS,
115 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 114 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
116 .emulated = ATA_SHT_EMULATED, 115 .emulated = ATA_SHT_EMULATED,
117 .use_clustering = ATA_SHT_USE_CLUSTERING, 116 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -120,6 +119,8 @@ static struct scsi_host_template hpt3x3_sht = {
120 .slave_configure = ata_scsi_slave_config, 119 .slave_configure = ata_scsi_slave_config,
121 .slave_destroy = ata_scsi_slave_destroy, 120 .slave_destroy = ata_scsi_slave_destroy,
122 .bios_param = ata_std_bios_param, 121 .bios_param = ata_std_bios_param,
122 .resume = ata_scsi_device_resume,
123 .suspend = ata_scsi_device_suspend,
123}; 124};
124 125
125static struct ata_port_operations hpt3x3_port_ops = { 126static struct ata_port_operations hpt3x3_port_ops = {
@@ -158,6 +159,27 @@ static struct ata_port_operations hpt3x3_port_ops = {
158}; 159};
159 160
160/** 161/**
162 * hpt3x3_init_chipset - chip setup
163 * @dev: PCI device
164 *
165 * Perform the setup required at boot and on resume.
166 */
167
168static void hpt3x3_init_chipset(struct pci_dev *dev)
169{
170 u16 cmd;
171 /* Initialize the board */
172 pci_write_config_word(dev, 0x80, 0x00);
173 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
174 pci_read_config_word(dev, PCI_COMMAND, &cmd);
175 if (cmd & PCI_COMMAND_MEMORY)
176 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
177 else
178 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
179}
180
181
182/**
161 * hpt3x3_init_one - Initialise an HPT343/363 183 * hpt3x3_init_one - Initialise an HPT343/363
162 * @dev: PCI device 184 * @dev: PCI device
163 * @id: Entry in match table 185 * @id: Entry in match table
@@ -178,21 +200,18 @@ static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
178 .port_ops = &hpt3x3_port_ops 200 .port_ops = &hpt3x3_port_ops
179 }; 201 };
180 static struct ata_port_info *port_info[2] = { &info, &info }; 202 static struct ata_port_info *port_info[2] = { &info, &info };
181 u16 cmd;
182
183 /* Initialize the board */
184 pci_write_config_word(dev, 0x80, 0x00);
185 /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
186 pci_read_config_word(dev, PCI_COMMAND, &cmd);
187 if (cmd & PCI_COMMAND_MEMORY)
188 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
189 else
190 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
191 203
204 hpt3x3_init_chipset(dev);
192 /* Now kick off ATA set up */ 205 /* Now kick off ATA set up */
193 return ata_pci_init_one(dev, port_info, 2); 206 return ata_pci_init_one(dev, port_info, 2);
194} 207}
195 208
209static int hpt3x3_reinit_one(struct pci_dev *dev)
210{
211 hpt3x3_init_chipset(dev);
212 return ata_pci_device_resume(dev);
213}
214
196static const struct pci_device_id hpt3x3[] = { 215static const struct pci_device_id hpt3x3[] = {
197 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), }, 216 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
198 217
@@ -203,7 +222,9 @@ static struct pci_driver hpt3x3_pci_driver = {
203 .name = DRV_NAME, 222 .name = DRV_NAME,
204 .id_table = hpt3x3, 223 .id_table = hpt3x3,
205 .probe = hpt3x3_init_one, 224 .probe = hpt3x3_init_one,
206 .remove = ata_pci_remove_one 225 .remove = ata_pci_remove_one,
226 .suspend = ata_pci_device_suspend,
227 .resume = hpt3x3_reinit_one,
207}; 228};
208 229
209static int __init hpt3x3_init(void) 230static int __init hpt3x3_init(void)
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 40ca2b82b7fc..a97d55ae95c9 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -27,7 +27,6 @@ static struct scsi_host_template isapnp_sht = {
27 .can_queue = ATA_DEF_QUEUE, 27 .can_queue = ATA_DEF_QUEUE,
28 .this_id = ATA_SHT_THIS_ID, 28 .this_id = ATA_SHT_THIS_ID,
29 .sg_tablesize = LIBATA_MAX_PRD, 29 .sg_tablesize = LIBATA_MAX_PRD,
30 .max_sectors = ATA_MAX_SECTORS,
31 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 30 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
32 .emulated = ATA_SHT_EMULATED, 31 .emulated = ATA_SHT_EMULATED,
33 .use_clustering = ATA_SHT_USE_CLUSTERING, 32 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7f68f14be6fd..0b56ff3d1cfe 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
80 80
81 81
82#define DRV_NAME "pata_it821x" 82#define DRV_NAME "pata_it821x"
83#define DRV_VERSION "0.3.2" 83#define DRV_VERSION "0.3.3"
84 84
85struct it821x_dev 85struct it821x_dev
86{ 86{
@@ -666,9 +666,6 @@ static struct scsi_host_template it821x_sht = {
666 .can_queue = ATA_DEF_QUEUE, 666 .can_queue = ATA_DEF_QUEUE,
667 .this_id = ATA_SHT_THIS_ID, 667 .this_id = ATA_SHT_THIS_ID,
668 .sg_tablesize = LIBATA_MAX_PRD, 668 .sg_tablesize = LIBATA_MAX_PRD,
669 /* 255 sectors to begin with. This is locked in smart mode but not
670 in pass through */
671 .max_sectors = 255,
672 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 669 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
673 .emulated = ATA_SHT_EMULATED, 670 .emulated = ATA_SHT_EMULATED,
674 .use_clustering = ATA_SHT_USE_CLUSTERING, 671 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -677,6 +674,8 @@ static struct scsi_host_template it821x_sht = {
677 .slave_configure = ata_scsi_slave_config, 674 .slave_configure = ata_scsi_slave_config,
678 .slave_destroy = ata_scsi_slave_destroy, 675 .slave_destroy = ata_scsi_slave_destroy,
679 .bios_param = ata_std_bios_param, 676 .bios_param = ata_std_bios_param,
677 .resume = ata_scsi_device_resume,
678 .suspend = ata_scsi_device_suspend,
680}; 679};
681 680
682static struct ata_port_operations it821x_smart_port_ops = { 681static struct ata_port_operations it821x_smart_port_ops = {
@@ -809,6 +808,14 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
809 return ata_pci_init_one(pdev, port_info, 2); 808 return ata_pci_init_one(pdev, port_info, 2);
810} 809}
811 810
811static int it821x_reinit_one(struct pci_dev *pdev)
812{
813 /* Resume - turn raid back off if need be */
814 if (it8212_noraid)
815 it821x_disable_raid(pdev);
816 return ata_pci_device_resume(pdev);
817}
818
812static const struct pci_device_id it821x[] = { 819static const struct pci_device_id it821x[] = {
813 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, 820 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
814 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, 821 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
@@ -820,7 +827,9 @@ static struct pci_driver it821x_pci_driver = {
820 .name = DRV_NAME, 827 .name = DRV_NAME,
821 .id_table = it821x, 828 .id_table = it821x,
822 .probe = it821x_init_one, 829 .probe = it821x_init_one,
823 .remove = ata_pci_remove_one 830 .remove = ata_pci_remove_one,
831 .suspend = ata_pci_device_suspend,
832 .resume = it821x_reinit_one,
824}; 833};
825 834
826static int __init it821x_init(void) 835static int __init it821x_init(void)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
new file mode 100644
index 000000000000..cb8924109f59
--- /dev/null
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -0,0 +1,271 @@
1/*
2 * ixp4xx PATA/Compact Flash driver
3 * Copyright (c) 2006 Tower Technologies
4 * Author: Alessandro Zummo <a.zummo@towertech.it>
5 *
6 * An ATA driver to handle a Compact Flash connected
7 * to the ixp4xx expansion bus in TrueIDE mode. The CF
8 * must have it chip selects connected to two CS lines
9 * on the ixp4xx. The interrupt line is optional, if not
10 * specified the driver will run in polling mode.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/libata.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <scsi/scsi_host.h>
24
25#define DRV_NAME "pata_ixp4xx_cf"
26#define DRV_VERSION "0.1.1"
27
28static void ixp4xx_set_mode(struct ata_port *ap)
29{
30 int i;
31
32 for (i = 0; i < ATA_MAX_DEVICES; i++) {
33 struct ata_device *dev = &ap->device[i];
34 if (ata_dev_enabled(dev)) {
35 dev->pio_mode = XFER_PIO_0;
36 dev->xfer_mode = XFER_PIO_0;
37 dev->xfer_shift = ATA_SHIFT_PIO;
38 dev->flags |= ATA_DFLAG_PIO;
39 }
40 }
41}
42
43static void ixp4xx_phy_reset(struct ata_port *ap)
44{
45 ap->cbl = ATA_CBL_PATA40;
46 ata_port_probe(ap);
47 ata_bus_reset(ap);
48}
49
50static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
51 unsigned int buflen, int write_data)
52{
53 unsigned int i;
54 unsigned int words = buflen >> 1;
55 u16 *buf16 = (u16 *) buf;
56 struct ata_port *ap = adev->ap;
57 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
58 struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
59
60 /* set the expansion bus in 16bit mode and restore
61 * 8 bit mode after the transaction.
62 */
63 *data->cs0_cfg &= ~(0x01);
64 udelay(100);
65
66 /* Transfer multiple of 2 bytes */
67 if (write_data) {
68 for (i = 0; i < words; i++)
69 writew(buf16[i], mmio);
70 } else {
71 for (i = 0; i < words; i++)
72 buf16[i] = readw(mmio);
73 }
74
75 /* Transfer trailing 1 byte, if any. */
76 if (unlikely(buflen & 0x01)) {
77 u16 align_buf[1] = { 0 };
78 unsigned char *trailing_buf = buf + buflen - 1;
79
80 if (write_data) {
81 memcpy(align_buf, trailing_buf, 1);
82 writew(align_buf[0], mmio);
83 } else {
84 align_buf[0] = readw(mmio);
85 memcpy(trailing_buf, align_buf, 1);
86 }
87 }
88
89 udelay(100);
90 *data->cs0_cfg |= 0x01;
91}
92
93static void ixp4xx_irq_clear(struct ata_port *ap)
94{
95}
96
97static void ixp4xx_host_stop (struct ata_host *host)
98{
99 struct ixp4xx_pata_data *data = host->dev->platform_data;
100
101 iounmap(data->cs0);
102 iounmap(data->cs1);
103}
104
105static struct scsi_host_template ixp4xx_sht = {
106 .module = THIS_MODULE,
107 .name = DRV_NAME,
108 .ioctl = ata_scsi_ioctl,
109 .queuecommand = ata_scsi_queuecmd,
110 .can_queue = ATA_DEF_QUEUE,
111 .this_id = ATA_SHT_THIS_ID,
112 .sg_tablesize = LIBATA_MAX_PRD,
113 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
114 .emulated = ATA_SHT_EMULATED,
115 .use_clustering = ATA_SHT_USE_CLUSTERING,
116 .proc_name = DRV_NAME,
117 .dma_boundary = ATA_DMA_BOUNDARY,
118 .slave_configure = ata_scsi_slave_config,
119 .slave_destroy = ata_scsi_slave_destroy,
120 .bios_param = ata_std_bios_param,
121};
122
123static struct ata_port_operations ixp4xx_port_ops = {
124 .set_mode = ixp4xx_set_mode,
125 .mode_filter = ata_pci_default_filter,
126
127 .port_disable = ata_port_disable,
128 .tf_load = ata_tf_load,
129 .tf_read = ata_tf_read,
130 .check_status = ata_check_status,
131 .exec_command = ata_exec_command,
132 .dev_select = ata_std_dev_select,
133
134 .qc_prep = ata_qc_prep,
135 .qc_issue = ata_qc_issue_prot,
136 .eng_timeout = ata_eng_timeout,
137 .data_xfer = ixp4xx_mmio_data_xfer,
138
139 .irq_handler = ata_interrupt,
140 .irq_clear = ixp4xx_irq_clear,
141
142 .port_start = ata_port_start,
143 .port_stop = ata_port_stop,
144 .host_stop = ixp4xx_host_stop,
145
146 .phy_reset = ixp4xx_phy_reset,
147};
148
149static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
150 struct ixp4xx_pata_data *data)
151{
152 ioaddr->cmd_addr = (unsigned long) data->cs0;
153 ioaddr->altstatus_addr = (unsigned long) data->cs1 + 0x06;
154 ioaddr->ctl_addr = (unsigned long) data->cs1 + 0x06;
155
156 ata_std_ports(ioaddr);
157
158#ifndef __ARMEB__
159
160 /* adjust the addresses to handle the address swizzling of the
161 * ixp4xx in little endian mode.
162 */
163
164 ioaddr->data_addr ^= 0x02;
165 ioaddr->cmd_addr ^= 0x03;
166 ioaddr->altstatus_addr ^= 0x03;
167 ioaddr->ctl_addr ^= 0x03;
168 ioaddr->error_addr ^= 0x03;
169 ioaddr->feature_addr ^= 0x03;
170 ioaddr->nsect_addr ^= 0x03;
171 ioaddr->lbal_addr ^= 0x03;
172 ioaddr->lbam_addr ^= 0x03;
173 ioaddr->lbah_addr ^= 0x03;
174 ioaddr->device_addr ^= 0x03;
175 ioaddr->status_addr ^= 0x03;
176 ioaddr->command_addr ^= 0x03;
177#endif
178}
179
180static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
181{
182 int ret;
183 unsigned int irq;
184 struct resource *cs0, *cs1;
185 struct ata_probe_ent ae;
186
187 struct ixp4xx_pata_data *data = pdev->dev.platform_data;
188
189 cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
190 cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
191
192 if (!cs0 || !cs1)
193 return -EINVAL;
194
195 pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
196
197 data->cs0 = ioremap(cs0->start, 0x1000);
198 data->cs1 = ioremap(cs1->start, 0x1000);
199
200 irq = platform_get_irq(pdev, 0);
201 if (irq)
202 set_irq_type(irq, IRQT_HIGH);
203
204 /* Setup expansion bus chip selects */
205 *data->cs0_cfg = data->cs0_bits;
206 *data->cs1_cfg = data->cs1_bits;
207
208 memset(&ae, 0, sizeof(struct ata_probe_ent));
209 INIT_LIST_HEAD(&ae.node);
210
211 ae.dev = &pdev->dev;
212 ae.port_ops = &ixp4xx_port_ops;
213 ae.sht = &ixp4xx_sht;
214 ae.n_ports = 1;
215 ae.pio_mask = 0x1f; /* PIO4 */
216 ae.irq = irq;
217 ae.irq_flags = 0;
218 ae.port_flags = ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
219 | ATA_FLAG_NO_ATAPI | ATA_FLAG_SRST;
220
221 /* run in polling mode if no irq has been assigned */
222 if (!irq)
223 ae.port_flags |= ATA_FLAG_PIO_POLLING;
224
225 ixp4xx_setup_port(&ae.port[0], data);
226
227 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
228
229 ret = ata_device_add(&ae);
230 if (ret == 0)
231 return -ENODEV;
232
233 return 0;
234}
235
236static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
237{
238 struct ata_host *host = platform_get_drvdata(dev);
239
240 ata_host_remove(host);
241 platform_set_drvdata(dev, NULL);
242
243 return 0;
244}
245
246static struct platform_driver ixp4xx_pata_platform_driver = {
247 .driver = {
248 .name = DRV_NAME,
249 .owner = THIS_MODULE,
250 },
251 .probe = ixp4xx_pata_probe,
252 .remove = __devexit_p(ixp4xx_pata_remove),
253};
254
255static int __init ixp4xx_pata_init(void)
256{
257 return platform_driver_register(&ixp4xx_pata_platform_driver);
258}
259
260static void __exit ixp4xx_pata_exit(void)
261{
262 platform_driver_unregister(&ixp4xx_pata_platform_driver);
263}
264
265MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
266MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
267MODULE_LICENSE("GPL");
268MODULE_VERSION(DRV_VERSION);
269
270module_init(ixp4xx_pata_init);
271module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 0210b10d49cd..2d661cb4df3c 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -19,7 +19,7 @@
19#include <linux/ata.h> 19#include <linux/ata.h>
20 20
21#define DRV_NAME "pata_jmicron" 21#define DRV_NAME "pata_jmicron"
22#define DRV_VERSION "0.1.2" 22#define DRV_VERSION "0.1.4"
23 23
24typedef enum { 24typedef enum {
25 PORT_PATA0 = 0, 25 PORT_PATA0 = 0,
@@ -128,8 +128,6 @@ static struct scsi_host_template jmicron_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 /* Special handling needed if you have sector or LBA48 limits */
132 .max_sectors = ATA_MAX_SECTORS,
133 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
134 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
135 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -213,12 +211,11 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
213 211
214 /* FIXME: We may want a way to override this in future */ 212 /* FIXME: We may want a way to override this in future */
215 pci_write_config_byte(pdev, 0x41, 0xa1); 213 pci_write_config_byte(pdev, 0x41, 0xa1);
216 }
217
218 /* PATA controller is fn 1, AHCI is fn 0 */
219 if (PCI_FUNC(pdev->devfn) != 1)
220 return -ENODEV;
221 214
215 /* PATA controller is fn 1, AHCI is fn 0 */
216 if (PCI_FUNC(pdev->devfn) != 1)
217 return -ENODEV;
218 }
222 if ( id->driver_data == 365 || id->driver_data == 366) { 219 if ( id->driver_data == 365 || id->driver_data == 366) {
223 /* The 365/66 have two PATA channels, redirect the second */ 220 /* The 365/66 have two PATA channels, redirect the second */
224 pci_read_config_dword(pdev, 0x80, &reg); 221 pci_read_config_dword(pdev, 0x80, &reg);
@@ -229,6 +226,27 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
229 return ata_pci_init_one(pdev, port_info, 2); 226 return ata_pci_init_one(pdev, port_info, 2);
230} 227}
231 228
229static int jmicron_reinit_one(struct pci_dev *pdev)
230{
231 u32 reg;
232
233 switch(pdev->device) {
234 case PCI_DEVICE_ID_JMICRON_JMB368:
235 break;
236 case PCI_DEVICE_ID_JMICRON_JMB365:
237 case PCI_DEVICE_ID_JMICRON_JMB366:
238 /* Restore mapping or disks swap and boy does it get ugly */
239 pci_read_config_dword(pdev, 0x80, &reg);
240 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
241 pci_write_config_dword(pdev, 0x80, reg);
242 /* Fall through */
243 default:
244 /* Make sure AHCI is turned back on */
245 pci_write_config_byte(pdev, 0x41, 0xa1);
246 }
247 return ata_pci_device_resume(pdev);
248}
249
232static const struct pci_device_id jmicron_pci_tbl[] = { 250static const struct pci_device_id jmicron_pci_tbl[] = {
233 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361}, 251 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
234 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363}, 252 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
@@ -244,6 +262,8 @@ static struct pci_driver jmicron_pci_driver = {
244 .id_table = jmicron_pci_tbl, 262 .id_table = jmicron_pci_tbl,
245 .probe = jmicron_init_one, 263 .probe = jmicron_init_one,
246 .remove = ata_pci_remove_one, 264 .remove = ata_pci_remove_one,
265 .suspend = ata_pci_device_suspend,
266 .resume = jmicron_reinit_one,
247}; 267};
248 268
249static int __init jmicron_init(void) 269static int __init jmicron_init(void)
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index b39078b2a47b..c7d1738e4e69 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -128,7 +128,6 @@ static struct scsi_host_template legacy_sht = {
128 .can_queue = ATA_DEF_QUEUE, 128 .can_queue = ATA_DEF_QUEUE,
129 .this_id = ATA_SHT_THIS_ID, 129 .this_id = ATA_SHT_THIS_ID,
130 .sg_tablesize = LIBATA_MAX_PRD, 130 .sg_tablesize = LIBATA_MAX_PRD,
131 .max_sectors = ATA_MAX_SECTORS,
132 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 131 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
133 .emulated = ATA_SHT_EMULATED, 132 .emulated = ATA_SHT_EMULATED,
134 .use_clustering = ATA_SHT_USE_CLUSTERING, 133 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
new file mode 100644
index 000000000000..1c810ea00253
--- /dev/null
+++ b/drivers/ata/pata_marvell.c
@@ -0,0 +1,224 @@
1/*
2 * Marvell PATA driver.
3 *
4 * For the moment we drive the PATA port in legacy mode. That
5 * isn't making full use of the device functionality but it is
6 * easy to get working.
7 *
8 * (c) 2006 Red Hat <alan@redhat.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/blkdev.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <scsi/scsi_host.h>
19#include <linux/libata.h>
20#include <linux/ata.h>
21
22#define DRV_NAME "pata_marvell"
23#define DRV_VERSION "0.1.1"
24
25/**
26 * marvell_pre_reset - check for 40/80 pin
27 * @ap: Port
28 *
29 * Perform the PATA port setup we need.
30 */
31
32static int marvell_pre_reset(struct ata_port *ap)
33{
34 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
35 u32 devices;
36 void __iomem *barp;
37 int i;
38
39 /* Check if our port is enabled */
40
41 barp = pci_iomap(pdev, 5, 0x10);
42 if (barp == NULL)
43 return -ENOMEM;
44 printk("BAR5:");
45 for(i = 0; i <= 0x0F; i++)
46 printk("%02X:%02X ", i, readb(barp + i));
47 printk("\n");
48
49 devices = readl(barp + 0x0C);
50 pci_iounmap(pdev, barp);
51
52 if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
53 (!(devices & 0x10))) /* PATA enable ? */
54 return -ENOENT;
55
56 /* Cable type */
57 switch(ap->port_no)
58 {
59 case 0:
60 if (inb(ap->ioaddr.bmdma_addr + 1) & 1)
61 ap->cbl = ATA_CBL_PATA40;
62 else
63 ap->cbl = ATA_CBL_PATA80;
64 break;
65
66 case 1: /* Legacy SATA port */
67 ap->cbl = ATA_CBL_SATA;
68 break;
69 }
70 return ata_std_prereset(ap);
71}
72
73/**
74 * marvell_error_handler - Setup and error handler
75 * @ap: Port to handle
76 *
77 * LOCKING:
78 * None (inherited from caller).
79 */
80
81static void marvell_error_handler(struct ata_port *ap)
82{
83 return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset,
84 NULL, ata_std_postreset);
85}
86
87/* No PIO or DMA methods needed for this device */
88
89static struct scsi_host_template marvell_sht = {
90 .module = THIS_MODULE,
91 .name = DRV_NAME,
92 .ioctl = ata_scsi_ioctl,
93 .queuecommand = ata_scsi_queuecmd,
94 .can_queue = ATA_DEF_QUEUE,
95 .this_id = ATA_SHT_THIS_ID,
96 .sg_tablesize = LIBATA_MAX_PRD,
97 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
98 .emulated = ATA_SHT_EMULATED,
99 .use_clustering = ATA_SHT_USE_CLUSTERING,
100 .proc_name = DRV_NAME,
101 .dma_boundary = ATA_DMA_BOUNDARY,
102 .slave_configure = ata_scsi_slave_config,
103 .slave_destroy = ata_scsi_slave_destroy,
104 /* Use standard CHS mapping rules */
105 .bios_param = ata_std_bios_param,
106 .resume = ata_scsi_device_resume,
107 .suspend = ata_scsi_device_suspend,
108};
109
110static const struct ata_port_operations marvell_ops = {
111 .port_disable = ata_port_disable,
112
113 /* Task file is PCI ATA format, use helpers */
114 .tf_load = ata_tf_load,
115 .tf_read = ata_tf_read,
116 .check_status = ata_check_status,
117 .exec_command = ata_exec_command,
118 .dev_select = ata_std_dev_select,
119
120 .freeze = ata_bmdma_freeze,
121 .thaw = ata_bmdma_thaw,
122 .error_handler = marvell_error_handler,
123 .post_internal_cmd = ata_bmdma_post_internal_cmd,
124
125 /* BMDMA handling is PCI ATA format, use helpers */
126 .bmdma_setup = ata_bmdma_setup,
127 .bmdma_start = ata_bmdma_start,
128 .bmdma_stop = ata_bmdma_stop,
129 .bmdma_status = ata_bmdma_status,
130 .qc_prep = ata_qc_prep,
131 .qc_issue = ata_qc_issue_prot,
132 .data_xfer = ata_pio_data_xfer,
133
134 /* Timeout handling */
135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear,
137
138 /* Generic PATA PCI ATA helpers */
139 .port_start = ata_port_start,
140 .port_stop = ata_port_stop,
141 .host_stop = ata_host_stop,
142};
143
144
145/**
146 * marvell_init_one - Register Marvell ATA PCI device with kernel services
147 * @pdev: PCI device to register
148 * @ent: Entry in marvell_pci_tbl matching with @pdev
149 *
150 * Called from kernel PCI layer.
151 *
152 * LOCKING:
153 * Inherited from PCI layer (may sleep).
154 *
155 * RETURNS:
156 * Zero on success, or -ERRNO value.
157 */
158
159static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
160{
161 static struct ata_port_info info = {
162 .sht = &marvell_sht,
163 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
164
165 .pio_mask = 0x1f,
166 .mwdma_mask = 0x07,
167 .udma_mask = 0x3f,
168
169 .port_ops = &marvell_ops,
170 };
171 static struct ata_port_info info_sata = {
172 .sht = &marvell_sht,
173 /* Slave possible as its magically mapped not real */
174 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
175
176 .pio_mask = 0x1f,
177 .mwdma_mask = 0x07,
178 .udma_mask = 0x7f,
179
180 .port_ops = &marvell_ops,
181 };
182 struct ata_port_info *port_info[2] = { &info, &info_sata };
183 int n_port = 2;
184
185 if (pdev->device == 0x6101)
186 n_port = 1;
187
188 return ata_pci_init_one(pdev, port_info, n_port);
189}
190
191static const struct pci_device_id marvell_pci_tbl[] = {
192 { PCI_DEVICE(0x11AB, 0x6101), },
193 { PCI_DEVICE(0x11AB, 0x6145), },
194 { } /* terminate list */
195};
196
197static struct pci_driver marvell_pci_driver = {
198 .name = DRV_NAME,
199 .id_table = marvell_pci_tbl,
200 .probe = marvell_init_one,
201 .remove = ata_pci_remove_one,
202 .suspend = ata_pci_device_suspend,
203 .resume = ata_pci_device_resume,
204};
205
206static int __init marvell_init(void)
207{
208 return pci_register_driver(&marvell_pci_driver);
209}
210
211static void __exit marvell_exit(void)
212{
213 pci_unregister_driver(&marvell_pci_driver);
214}
215
216module_init(marvell_init);
217module_exit(marvell_exit);
218
219MODULE_AUTHOR("Alan Cox");
220MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
221MODULE_LICENSE("GPL");
222MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
223MODULE_VERSION(DRV_VERSION);
224
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index e00d406bfdf5..4ccca938675e 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "pata_mpiix" 37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.2" 38#define DRV_VERSION "0.7.3"
39 39
40enum { 40enum {
41 IDETIM = 0x6C, /* IDE control register */ 41 IDETIM = 0x6C, /* IDE control register */
@@ -159,7 +159,6 @@ static struct scsi_host_template mpiix_sht = {
159 .can_queue = ATA_DEF_QUEUE, 159 .can_queue = ATA_DEF_QUEUE,
160 .this_id = ATA_SHT_THIS_ID, 160 .this_id = ATA_SHT_THIS_ID,
161 .sg_tablesize = LIBATA_MAX_PRD, 161 .sg_tablesize = LIBATA_MAX_PRD,
162 .max_sectors = ATA_MAX_SECTORS,
163 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 162 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
164 .emulated = ATA_SHT_EMULATED, 163 .emulated = ATA_SHT_EMULATED,
165 .use_clustering = ATA_SHT_USE_CLUSTERING, 164 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -168,6 +167,8 @@ static struct scsi_host_template mpiix_sht = {
168 .slave_configure = ata_scsi_slave_config, 167 .slave_configure = ata_scsi_slave_config,
169 .slave_destroy = ata_scsi_slave_destroy, 168 .slave_destroy = ata_scsi_slave_destroy,
170 .bios_param = ata_std_bios_param, 169 .bios_param = ata_std_bios_param,
170 .resume = ata_scsi_device_resume,
171 .suspend = ata_scsi_device_suspend,
171}; 172};
172 173
173static struct ata_port_operations mpiix_port_ops = { 174static struct ata_port_operations mpiix_port_ops = {
@@ -285,7 +286,9 @@ static struct pci_driver mpiix_pci_driver = {
285 .name = DRV_NAME, 286 .name = DRV_NAME,
286 .id_table = mpiix, 287 .id_table = mpiix,
287 .probe = mpiix_init_one, 288 .probe = mpiix_init_one,
288 .remove = mpiix_remove_one 289 .remove = mpiix_remove_one,
290 .suspend = ata_pci_device_suspend,
291 .resume = ata_pci_device_resume,
289}; 292};
290 293
291static int __init mpiix_init(void) 294static int __init mpiix_init(void)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 1963a4d35873..cf7fe037471c 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -16,7 +16,7 @@
16#include <linux/ata.h> 16#include <linux/ata.h>
17 17
18#define DRV_NAME "pata_netcell" 18#define DRV_NAME "pata_netcell"
19#define DRV_VERSION "0.1.5" 19#define DRV_VERSION "0.1.6"
20 20
21/** 21/**
22 * netcell_probe_init - check for 40/80 pin 22 * netcell_probe_init - check for 40/80 pin
@@ -54,8 +54,6 @@ static struct scsi_host_template netcell_sht = {
54 .can_queue = ATA_DEF_QUEUE, 54 .can_queue = ATA_DEF_QUEUE,
55 .this_id = ATA_SHT_THIS_ID, 55 .this_id = ATA_SHT_THIS_ID,
56 .sg_tablesize = LIBATA_MAX_PRD, 56 .sg_tablesize = LIBATA_MAX_PRD,
57 /* Special handling needed if you have sector or LBA48 limits */
58 .max_sectors = ATA_MAX_SECTORS,
59 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 57 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
60 .emulated = ATA_SHT_EMULATED, 58 .emulated = ATA_SHT_EMULATED,
61 .use_clustering = ATA_SHT_USE_CLUSTERING, 59 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -65,6 +63,8 @@ static struct scsi_host_template netcell_sht = {
65 .slave_destroy = ata_scsi_slave_destroy, 63 .slave_destroy = ata_scsi_slave_destroy,
66 /* Use standard CHS mapping rules */ 64 /* Use standard CHS mapping rules */
67 .bios_param = ata_std_bios_param, 65 .bios_param = ata_std_bios_param,
66 .resume = ata_scsi_device_resume,
67 .suspend = ata_scsi_device_suspend,
68}; 68};
69 69
70static const struct ata_port_operations netcell_ops = { 70static const struct ata_port_operations netcell_ops = {
@@ -153,6 +153,8 @@ static struct pci_driver netcell_pci_driver = {
153 .id_table = netcell_pci_tbl, 153 .id_table = netcell_pci_tbl,
154 .probe = netcell_init_one, 154 .probe = netcell_init_one,
155 .remove = ata_pci_remove_one, 155 .remove = ata_pci_remove_one,
156 .suspend = ata_pci_device_suspend,
157 .resume = ata_pci_device_resume,
156}; 158};
157 159
158static int __init netcell_init(void) 160static int __init netcell_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 7ec800f00ec8..c3032eb9010d 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -28,7 +28,7 @@
28#include <linux/libata.h> 28#include <linux/libata.h>
29 29
30#define DRV_NAME "pata_ns87410" 30#define DRV_NAME "pata_ns87410"
31#define DRV_VERSION "0.4.2" 31#define DRV_VERSION "0.4.3"
32 32
33/** 33/**
34 * ns87410_pre_reset - probe begin 34 * ns87410_pre_reset - probe begin
@@ -149,7 +149,6 @@ static struct scsi_host_template ns87410_sht = {
149 .can_queue = ATA_DEF_QUEUE, 149 .can_queue = ATA_DEF_QUEUE,
150 .this_id = ATA_SHT_THIS_ID, 150 .this_id = ATA_SHT_THIS_ID,
151 .sg_tablesize = LIBATA_MAX_PRD, 151 .sg_tablesize = LIBATA_MAX_PRD,
152 .max_sectors = ATA_MAX_SECTORS,
153 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 152 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
154 .emulated = ATA_SHT_EMULATED, 153 .emulated = ATA_SHT_EMULATED,
155 .use_clustering = ATA_SHT_USE_CLUSTERING, 154 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -158,6 +157,8 @@ static struct scsi_host_template ns87410_sht = {
158 .slave_configure = ata_scsi_slave_config, 157 .slave_configure = ata_scsi_slave_config,
159 .slave_destroy = ata_scsi_slave_destroy, 158 .slave_destroy = ata_scsi_slave_destroy,
160 .bios_param = ata_std_bios_param, 159 .bios_param = ata_std_bios_param,
160 .resume = ata_scsi_device_resume,
161 .suspend = ata_scsi_device_suspend,
161}; 162};
162 163
163static struct ata_port_operations ns87410_port_ops = { 164static struct ata_port_operations ns87410_port_ops = {
@@ -210,7 +211,9 @@ static struct pci_driver ns87410_pci_driver = {
210 .name = DRV_NAME, 211 .name = DRV_NAME,
211 .id_table = ns87410, 212 .id_table = ns87410,
212 .probe = ns87410_init_one, 213 .probe = ns87410_init_one,
213 .remove = ata_pci_remove_one 214 .remove = ata_pci_remove_one,
215 .suspend = ata_pci_device_suspend,
216 .resume = ata_pci_device_resume,
214}; 217};
215 218
216static int __init ns87410_init(void) 219static int __init ns87410_init(void)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 8837256632e9..10ac3cc10181 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -224,7 +224,6 @@ static struct scsi_host_template oldpiix_sht = {
224 .can_queue = ATA_DEF_QUEUE, 224 .can_queue = ATA_DEF_QUEUE,
225 .this_id = ATA_SHT_THIS_ID, 225 .this_id = ATA_SHT_THIS_ID,
226 .sg_tablesize = LIBATA_MAX_PRD, 226 .sg_tablesize = LIBATA_MAX_PRD,
227 .max_sectors = ATA_MAX_SECTORS,
228 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 227 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
229 .emulated = ATA_SHT_EMULATED, 228 .emulated = ATA_SHT_EMULATED,
230 .use_clustering = ATA_SHT_USE_CLUSTERING, 229 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -233,6 +232,8 @@ static struct scsi_host_template oldpiix_sht = {
233 .slave_configure = ata_scsi_slave_config, 232 .slave_configure = ata_scsi_slave_config,
234 .slave_destroy = ata_scsi_slave_destroy, 233 .slave_destroy = ata_scsi_slave_destroy,
235 .bios_param = ata_std_bios_param, 234 .bios_param = ata_std_bios_param,
235 .resume = ata_scsi_device_resume,
236 .suspend = ata_scsi_device_suspend,
236}; 237};
237 238
238static const struct ata_port_operations oldpiix_pata_ops = { 239static const struct ata_port_operations oldpiix_pata_ops = {
@@ -314,6 +315,8 @@ static struct pci_driver oldpiix_pci_driver = {
314 .id_table = oldpiix_pci_tbl, 315 .id_table = oldpiix_pci_tbl,
315 .probe = oldpiix_init_one, 316 .probe = oldpiix_init_one,
316 .remove = ata_pci_remove_one, 317 .remove = ata_pci_remove_one,
318 .suspend = ata_pci_device_suspend,
319 .resume = ata_pci_device_resume,
317}; 320};
318 321
319static int __init oldpiix_init(void) 322static int __init oldpiix_init(void)
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index c6319cf50de4..c2988b0aa8ea 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -34,7 +34,7 @@
34#include <linux/libata.h> 34#include <linux/libata.h>
35 35
36#define DRV_NAME "pata_opti" 36#define DRV_NAME "pata_opti"
37#define DRV_VERSION "0.2.5" 37#define DRV_VERSION "0.2.7"
38 38
39enum { 39enum {
40 READ_REG = 0, /* index of Read cycle timing register */ 40 READ_REG = 0, /* index of Read cycle timing register */
@@ -109,30 +109,6 @@ static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
109 outb(0x83, regio + 2); 109 outb(0x83, regio + 2);
110} 110}
111 111
112#if 0
113/**
114 * opti_read_reg - control register read
115 * @ap: ATA port
116 * @reg: control register number
117 *
118 * The Opti uses magic 'trapdoor' register accesses to do configuration
119 * rather than using PCI space as other controllers do. The double inw
120 * on the error register activates configuration mode. We can then read
121 * the control register
122 */
123
124static u8 opti_read_reg(struct ata_port *ap, int reg)
125{
126 unsigned long regio = ap->ioaddr.cmd_addr;
127 u8 ret;
128 inw(regio + 1);
129 inw(regio + 1);
130 outb(3, regio + 2);
131 ret = inb(regio + reg);
132 outb(0x83, regio + 2);
133}
134#endif
135
136/** 112/**
137 * opti_set_piomode - set initial PIO mode data 113 * opti_set_piomode - set initial PIO mode data
138 * @ap: ATA interface 114 * @ap: ATA interface
@@ -195,7 +171,6 @@ static struct scsi_host_template opti_sht = {
195 .can_queue = ATA_DEF_QUEUE, 171 .can_queue = ATA_DEF_QUEUE,
196 .this_id = ATA_SHT_THIS_ID, 172 .this_id = ATA_SHT_THIS_ID,
197 .sg_tablesize = LIBATA_MAX_PRD, 173 .sg_tablesize = LIBATA_MAX_PRD,
198 .max_sectors = ATA_MAX_SECTORS,
199 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 174 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
200 .emulated = ATA_SHT_EMULATED, 175 .emulated = ATA_SHT_EMULATED,
201 .use_clustering = ATA_SHT_USE_CLUSTERING, 176 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -204,12 +179,13 @@ static struct scsi_host_template opti_sht = {
204 .slave_configure = ata_scsi_slave_config, 179 .slave_configure = ata_scsi_slave_config,
205 .slave_destroy = ata_scsi_slave_destroy, 180 .slave_destroy = ata_scsi_slave_destroy,
206 .bios_param = ata_std_bios_param, 181 .bios_param = ata_std_bios_param,
182 .resume = ata_scsi_device_resume,
183 .suspend = ata_scsi_device_suspend,
207}; 184};
208 185
209static struct ata_port_operations opti_port_ops = { 186static struct ata_port_operations opti_port_ops = {
210 .port_disable = ata_port_disable, 187 .port_disable = ata_port_disable,
211 .set_piomode = opti_set_piomode, 188 .set_piomode = opti_set_piomode,
212/* .set_dmamode = opti_set_dmamode, */
213 .tf_load = ata_tf_load, 189 .tf_load = ata_tf_load,
214 .tf_read = ata_tf_read, 190 .tf_read = ata_tf_read,
215 .check_status = ata_check_status, 191 .check_status = ata_check_status,
@@ -267,7 +243,9 @@ static struct pci_driver opti_pci_driver = {
267 .name = DRV_NAME, 243 .name = DRV_NAME,
268 .id_table = opti, 244 .id_table = opti,
269 .probe = opti_init_one, 245 .probe = opti_init_one,
270 .remove = ata_pci_remove_one 246 .remove = ata_pci_remove_one,
247 .suspend = ata_pci_device_suspend,
248 .resume = ata_pci_device_resume,
271}; 249};
272 250
273static int __init opti_init(void) 251static int __init opti_init(void)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 2f4770cce04e..80d111c569dc 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_optidma" 35#define DRV_NAME "pata_optidma"
36#define DRV_VERSION "0.2.2" 36#define DRV_VERSION "0.2.3"
37 37
38enum { 38enum {
39 READ_REG = 0, /* index of Read cycle timing register */ 39 READ_REG = 0, /* index of Read cycle timing register */
@@ -352,7 +352,6 @@ static struct scsi_host_template optidma_sht = {
352 .can_queue = ATA_DEF_QUEUE, 352 .can_queue = ATA_DEF_QUEUE,
353 .this_id = ATA_SHT_THIS_ID, 353 .this_id = ATA_SHT_THIS_ID,
354 .sg_tablesize = LIBATA_MAX_PRD, 354 .sg_tablesize = LIBATA_MAX_PRD,
355 .max_sectors = ATA_MAX_SECTORS,
356 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 355 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
357 .emulated = ATA_SHT_EMULATED, 356 .emulated = ATA_SHT_EMULATED,
358 .use_clustering = ATA_SHT_USE_CLUSTERING, 357 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -361,6 +360,8 @@ static struct scsi_host_template optidma_sht = {
361 .slave_configure = ata_scsi_slave_config, 360 .slave_configure = ata_scsi_slave_config,
362 .slave_destroy = ata_scsi_slave_destroy, 361 .slave_destroy = ata_scsi_slave_destroy,
363 .bios_param = ata_std_bios_param, 362 .bios_param = ata_std_bios_param,
363 .resume = ata_scsi_device_resume,
364 .suspend = ata_scsi_device_suspend,
364}; 365};
365 366
366static struct ata_port_operations optidma_port_ops = { 367static struct ata_port_operations optidma_port_ops = {
@@ -522,7 +523,9 @@ static struct pci_driver optidma_pci_driver = {
522 .name = DRV_NAME, 523 .name = DRV_NAME,
523 .id_table = optidma, 524 .id_table = optidma,
524 .probe = optidma_init_one, 525 .probe = optidma_init_one,
525 .remove = ata_pci_remove_one 526 .remove = ata_pci_remove_one,
527 .suspend = ata_pci_device_suspend,
528 .resume = ata_pci_device_resume,
526}; 529};
527 530
528static int __init optidma_init(void) 531static int __init optidma_init(void)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 999922de476e..4ca6fa5dcb42 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -62,7 +62,6 @@ static struct scsi_host_template pcmcia_sht = {
62 .can_queue = ATA_DEF_QUEUE, 62 .can_queue = ATA_DEF_QUEUE,
63 .this_id = ATA_SHT_THIS_ID, 63 .this_id = ATA_SHT_THIS_ID,
64 .sg_tablesize = LIBATA_MAX_PRD, 64 .sg_tablesize = LIBATA_MAX_PRD,
65 .max_sectors = ATA_MAX_SECTORS,
66 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 65 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
67 .emulated = ATA_SHT_EMULATED, 66 .emulated = ATA_SHT_EMULATED,
68 .use_clustering = ATA_SHT_USE_CLUSTERING, 67 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index beb6d10a234b..76dd1c935dbd 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -134,7 +134,6 @@ static struct scsi_host_template pdc2027x_sht = {
134 .can_queue = ATA_DEF_QUEUE, 134 .can_queue = ATA_DEF_QUEUE,
135 .this_id = ATA_SHT_THIS_ID, 135 .this_id = ATA_SHT_THIS_ID,
136 .sg_tablesize = LIBATA_MAX_PRD, 136 .sg_tablesize = LIBATA_MAX_PRD,
137 .max_sectors = ATA_MAX_SECTORS,
138 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 137 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
139 .emulated = ATA_SHT_EMULATED, 138 .emulated = ATA_SHT_EMULATED,
140 .use_clustering = ATA_SHT_USE_CLUSTERING, 139 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -854,7 +853,7 @@ static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
854 */ 853 */
855static int __init pdc2027x_init(void) 854static int __init pdc2027x_init(void)
856{ 855{
857 return pci_module_init(&pdc2027x_pci_driver); 856 return pci_register_driver(&pdc2027x_pci_driver);
858} 857}
859 858
860/** 859/**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 6baf51b2fda1..ad691b9e7743 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -21,7 +21,7 @@
21#include <linux/libata.h> 21#include <linux/libata.h>
22 22
23#define DRV_NAME "pata_pdc202xx_old" 23#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.1" 24#define DRV_VERSION "0.2.3"
25 25
26/** 26/**
27 * pdc2024x_pre_reset - probe begin 27 * pdc2024x_pre_reset - probe begin
@@ -63,7 +63,7 @@ static void pdc2026x_error_handler(struct ata_port *ap)
63} 63}
64 64
65/** 65/**
66 * pdc_configure_piomode - set chip PIO timing 66 * pdc202xx_configure_piomode - set chip PIO timing
67 * @ap: ATA interface 67 * @ap: ATA interface
68 * @adev: ATA device 68 * @adev: ATA device
69 * @pio: PIO mode 69 * @pio: PIO mode
@@ -73,7 +73,7 @@ static void pdc2026x_error_handler(struct ata_port *ap)
73 * versa 73 * versa
74 */ 74 */
75 75
76static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) 76static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{ 77{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 78 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -98,7 +98,7 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev,
98} 98}
99 99
100/** 100/**
101 * pdc_set_piomode - set initial PIO mode data 101 * pdc202xx_set_piomode - set initial PIO mode data
102 * @ap: ATA interface 102 * @ap: ATA interface
103 * @adev: ATA device 103 * @adev: ATA device
104 * 104 *
@@ -106,13 +106,13 @@ static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev,
106 * but we want to set the PIO timing by default. 106 * but we want to set the PIO timing by default.
107 */ 107 */
108 108
109static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev) 109static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
110{ 110{
111 pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); 111 pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
112} 112}
113 113
114/** 114/**
115 * pdc_configure_dmamode - set DMA mode in chip 115 * pdc202xx_configure_dmamode - set DMA mode in chip
116 * @ap: ATA interface 116 * @ap: ATA interface
117 * @adev: ATA device 117 * @adev: ATA device
118 * 118 *
@@ -120,7 +120,7 @@ static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
120 * to occur. 120 * to occur.
121 */ 121 */
122 122
123static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev) 123static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{ 124{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 125 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno; 126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -184,7 +184,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
184 184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional 185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */ 186 and move to qc_issue ? */
187 pdc_set_dmamode(ap, qc->dev); 187 pdc202xx_set_dmamode(ap, qc->dev);
188 188
189 /* Cases the state machine will not complete correctly without help */ 189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA) 190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
@@ -254,7 +254,7 @@ static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
254 adev->max_sectors = 256; 254 adev->max_sectors = 256;
255} 255}
256 256
257static struct scsi_host_template pdc_sht = { 257static struct scsi_host_template pdc202xx_sht = {
258 .module = THIS_MODULE, 258 .module = THIS_MODULE,
259 .name = DRV_NAME, 259 .name = DRV_NAME,
260 .ioctl = ata_scsi_ioctl, 260 .ioctl = ata_scsi_ioctl,
@@ -262,7 +262,6 @@ static struct scsi_host_template pdc_sht = {
262 .can_queue = ATA_DEF_QUEUE, 262 .can_queue = ATA_DEF_QUEUE,
263 .this_id = ATA_SHT_THIS_ID, 263 .this_id = ATA_SHT_THIS_ID,
264 .sg_tablesize = LIBATA_MAX_PRD, 264 .sg_tablesize = LIBATA_MAX_PRD,
265 .max_sectors = ATA_MAX_SECTORS,
266 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 265 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
267 .emulated = ATA_SHT_EMULATED, 266 .emulated = ATA_SHT_EMULATED,
268 .use_clustering = ATA_SHT_USE_CLUSTERING, 267 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -271,12 +270,14 @@ static struct scsi_host_template pdc_sht = {
271 .slave_configure = ata_scsi_slave_config, 270 .slave_configure = ata_scsi_slave_config,
272 .slave_destroy = ata_scsi_slave_destroy, 271 .slave_destroy = ata_scsi_slave_destroy,
273 .bios_param = ata_std_bios_param, 272 .bios_param = ata_std_bios_param,
273 .resume = ata_scsi_device_resume,
274 .suspend = ata_scsi_device_suspend,
274}; 275};
275 276
276static struct ata_port_operations pdc2024x_port_ops = { 277static struct ata_port_operations pdc2024x_port_ops = {
277 .port_disable = ata_port_disable, 278 .port_disable = ata_port_disable,
278 .set_piomode = pdc_set_piomode, 279 .set_piomode = pdc202xx_set_piomode,
279 .set_dmamode = pdc_set_dmamode, 280 .set_dmamode = pdc202xx_set_dmamode,
280 .mode_filter = ata_pci_default_filter, 281 .mode_filter = ata_pci_default_filter,
281 .tf_load = ata_tf_load, 282 .tf_load = ata_tf_load,
282 .tf_read = ata_tf_read, 283 .tf_read = ata_tf_read,
@@ -308,8 +309,8 @@ static struct ata_port_operations pdc2024x_port_ops = {
308 309
309static struct ata_port_operations pdc2026x_port_ops = { 310static struct ata_port_operations pdc2026x_port_ops = {
310 .port_disable = ata_port_disable, 311 .port_disable = ata_port_disable,
311 .set_piomode = pdc_set_piomode, 312 .set_piomode = pdc202xx_set_piomode,
312 .set_dmamode = pdc_set_dmamode, 313 .set_dmamode = pdc202xx_set_dmamode,
313 .mode_filter = ata_pci_default_filter, 314 .mode_filter = ata_pci_default_filter,
314 .tf_load = ata_tf_load, 315 .tf_load = ata_tf_load,
315 .tf_read = ata_tf_read, 316 .tf_read = ata_tf_read,
@@ -340,11 +341,11 @@ static struct ata_port_operations pdc2026x_port_ops = {
340 .host_stop = ata_host_stop 341 .host_stop = ata_host_stop
341}; 342};
342 343
343static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id) 344static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
344{ 345{
345 static struct ata_port_info info[3] = { 346 static struct ata_port_info info[3] = {
346 { 347 {
347 .sht = &pdc_sht, 348 .sht = &pdc202xx_sht,
348 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 349 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
349 .pio_mask = 0x1f, 350 .pio_mask = 0x1f,
350 .mwdma_mask = 0x07, 351 .mwdma_mask = 0x07,
@@ -352,7 +353,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
352 .port_ops = &pdc2024x_port_ops 353 .port_ops = &pdc2024x_port_ops
353 }, 354 },
354 { 355 {
355 .sht = &pdc_sht, 356 .sht = &pdc202xx_sht,
356 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 357 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
357 .pio_mask = 0x1f, 358 .pio_mask = 0x1f,
358 .mwdma_mask = 0x07, 359 .mwdma_mask = 0x07,
@@ -360,7 +361,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
360 .port_ops = &pdc2026x_port_ops 361 .port_ops = &pdc2026x_port_ops
361 }, 362 },
362 { 363 {
363 .sht = &pdc_sht, 364 .sht = &pdc202xx_sht,
364 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 365 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
365 .pio_mask = 0x1f, 366 .pio_mask = 0x1f,
366 .mwdma_mask = 0x07, 367 .mwdma_mask = 0x07,
@@ -386,7 +387,7 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
386 return ata_pci_init_one(dev, port_info, 2); 387 return ata_pci_init_one(dev, port_info, 2);
387} 388}
388 389
389static const struct pci_device_id pdc[] = { 390static const struct pci_device_id pdc202xx[] = {
390 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, 391 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
391 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, 392 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
392 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 }, 393 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
@@ -396,28 +397,30 @@ static const struct pci_device_id pdc[] = {
396 { }, 397 { },
397}; 398};
398 399
399static struct pci_driver pdc_pci_driver = { 400static struct pci_driver pdc202xx_pci_driver = {
400 .name = DRV_NAME, 401 .name = DRV_NAME,
401 .id_table = pdc, 402 .id_table = pdc202xx,
402 .probe = pdc_init_one, 403 .probe = pdc202xx_init_one,
403 .remove = ata_pci_remove_one 404 .remove = ata_pci_remove_one,
405 .suspend = ata_pci_device_suspend,
406 .resume = ata_pci_device_resume,
404}; 407};
405 408
406static int __init pdc_init(void) 409static int __init pdc202xx_init(void)
407{ 410{
408 return pci_register_driver(&pdc_pci_driver); 411 return pci_register_driver(&pdc202xx_pci_driver);
409} 412}
410 413
411static void __exit pdc_exit(void) 414static void __exit pdc202xx_exit(void)
412{ 415{
413 pci_unregister_driver(&pdc_pci_driver); 416 pci_unregister_driver(&pdc202xx_pci_driver);
414} 417}
415 418
416MODULE_AUTHOR("Alan Cox"); 419MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); 420MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
419MODULE_DEVICE_TABLE(pci, pdc); 422MODULE_DEVICE_TABLE(pci, pdc202xx);
420MODULE_VERSION(DRV_VERSION); 423MODULE_VERSION(DRV_VERSION);
421 424
422module_init(pdc_init); 425module_init(pdc202xx_init);
423module_exit(pdc_exit); 426module_exit(pdc202xx_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
new file mode 100644
index 000000000000..443b1d85c6c4
--- /dev/null
+++ b/drivers/ata/pata_platform.c
@@ -0,0 +1,295 @@
1/*
2 * Generic platform device PATA driver
3 *
4 * Copyright (C) 2006 Paul Mundt
5 *
6 * Based on pata_pcmcia:
7 *
8 * Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <scsi/scsi_host.h>
19#include <linux/ata.h>
20#include <linux/libata.h>
21#include <linux/platform_device.h>
22#include <linux/pata_platform.h>
23
24#define DRV_NAME "pata_platform"
25#define DRV_VERSION "0.1.2"
26
27static int pio_mask = 1;
28
29/*
30 * Provide our own set_mode() as we don't want to change anything that has
31 * already been configured..
32 */
33static void pata_platform_set_mode(struct ata_port *ap)
34{
35 int i;
36
37 for (i = 0; i < ATA_MAX_DEVICES; i++) {
38 struct ata_device *dev = &ap->device[i];
39
40 if (ata_dev_enabled(dev)) {
41 /* We don't really care */
42 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
43 dev->xfer_shift = ATA_SHIFT_PIO;
44 dev->flags |= ATA_DFLAG_PIO;
45 }
46 }
47}
48
49static void pata_platform_host_stop(struct ata_host *host)
50{
51 int i;
52
53 /*
54 * Unmap the bases for MMIO
55 */
56 for (i = 0; i < host->n_ports; i++) {
57 struct ata_port *ap = host->ports[i];
58
59 if (ap->flags & ATA_FLAG_MMIO) {
60 iounmap((void __iomem *)ap->ioaddr.ctl_addr);
61 iounmap((void __iomem *)ap->ioaddr.cmd_addr);
62 }
63 }
64}
65
66static struct scsi_host_template pata_platform_sht = {
67 .module = THIS_MODULE,
68 .name = DRV_NAME,
69 .ioctl = ata_scsi_ioctl,
70 .queuecommand = ata_scsi_queuecmd,
71 .can_queue = ATA_DEF_QUEUE,
72 .this_id = ATA_SHT_THIS_ID,
73 .sg_tablesize = LIBATA_MAX_PRD,
74 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
75 .emulated = ATA_SHT_EMULATED,
76 .use_clustering = ATA_SHT_USE_CLUSTERING,
77 .proc_name = DRV_NAME,
78 .dma_boundary = ATA_DMA_BOUNDARY,
79 .slave_configure = ata_scsi_slave_config,
80 .slave_destroy = ata_scsi_slave_destroy,
81 .bios_param = ata_std_bios_param,
82};
83
84static struct ata_port_operations pata_platform_port_ops = {
85 .set_mode = pata_platform_set_mode,
86
87 .port_disable = ata_port_disable,
88 .tf_load = ata_tf_load,
89 .tf_read = ata_tf_read,
90 .check_status = ata_check_status,
91 .exec_command = ata_exec_command,
92 .dev_select = ata_std_dev_select,
93
94 .freeze = ata_bmdma_freeze,
95 .thaw = ata_bmdma_thaw,
96 .error_handler = ata_bmdma_error_handler,
97 .post_internal_cmd = ata_bmdma_post_internal_cmd,
98
99 .qc_prep = ata_qc_prep,
100 .qc_issue = ata_qc_issue_prot,
101
102 .data_xfer = ata_pio_data_xfer_noirq,
103
104 .irq_handler = ata_interrupt,
105 .irq_clear = ata_bmdma_irq_clear,
106
107 .port_start = ata_port_start,
108 .port_stop = ata_port_stop,
109 .host_stop = pata_platform_host_stop
110};
111
112static void pata_platform_setup_port(struct ata_ioports *ioaddr,
113 struct pata_platform_info *info)
114{
115 unsigned int shift = 0;
116
117 /* Fixup the port shift for platforms that need it */
118 if (info && info->ioport_shift)
119 shift = info->ioport_shift;
120
121 ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift);
122 ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift);
123 ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
124 ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift);
125 ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift);
126 ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift);
127 ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift);
128 ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift);
129 ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift);
130 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift);
131}
132
133/**
134 * pata_platform_probe - attach a platform interface
135 * @pdev: platform device
136 *
137 * Register a platform bus IDE interface. Such interfaces are PIO and we
138 * assume do not support IRQ sharing.
139 *
140 * Platform devices are expected to contain 3 resources per port:
141 *
142 * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
143 * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
144 * - IRQ (IORESOURCE_IRQ)
145 *
146 * If the base resources are both mem types, the ioremap() is handled
147 * here. For IORESOURCE_IO, it's assumed that there's no remapping
148 * necessary.
149 */
150static int __devinit pata_platform_probe(struct platform_device *pdev)
151{
152 struct resource *io_res, *ctl_res;
153 struct ata_probe_ent ae;
154 unsigned int mmio;
155 int ret;
156
157 /*
158 * Simple resource validation ..
159 */
160 if (unlikely(pdev->num_resources != 3)) {
161 dev_err(&pdev->dev, "invalid number of resources\n");
162 return -EINVAL;
163 }
164
165 /*
166 * Get the I/O base first
167 */
168 io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
169 if (io_res == NULL) {
170 io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 if (unlikely(io_res == NULL))
172 return -EINVAL;
173 }
174
175 /*
176 * Then the CTL base
177 */
178 ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
179 if (ctl_res == NULL) {
180 ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
181 if (unlikely(ctl_res == NULL))
182 return -EINVAL;
183 }
184
185 /*
186 * Check for MMIO
187 */
188 mmio = (( io_res->flags == IORESOURCE_MEM) &&
189 (ctl_res->flags == IORESOURCE_MEM));
190
191 /*
192 * Now that that's out of the way, wire up the port..
193 */
194 memset(&ae, 0, sizeof(struct ata_probe_ent));
195 INIT_LIST_HEAD(&ae.node);
196 ae.dev = &pdev->dev;
197 ae.port_ops = &pata_platform_port_ops;
198 ae.sht = &pata_platform_sht;
199 ae.n_ports = 1;
200 ae.pio_mask = pio_mask;
201 ae.irq = platform_get_irq(pdev, 0);
202 ae.irq_flags = 0;
203 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
204
205 /*
206 * Handle the MMIO case
207 */
208 if (mmio) {
209 ae.port_flags |= ATA_FLAG_MMIO;
210
211 ae.port[0].cmd_addr = (unsigned long)ioremap(io_res->start,
212 io_res->end - io_res->start + 1);
213 if (unlikely(!ae.port[0].cmd_addr)) {
214 dev_err(&pdev->dev, "failed to remap IO base\n");
215 return -ENXIO;
216 }
217
218 ae.port[0].ctl_addr = (unsigned long)ioremap(ctl_res->start,
219 ctl_res->end - ctl_res->start + 1);
220 if (unlikely(!ae.port[0].ctl_addr)) {
221 dev_err(&pdev->dev, "failed to remap CTL base\n");
222 ret = -ENXIO;
223 goto bad_remap;
224 }
225 } else {
226 ae.port[0].cmd_addr = io_res->start;
227 ae.port[0].ctl_addr = ctl_res->start;
228 }
229
230 ae.port[0].altstatus_addr = ae.port[0].ctl_addr;
231
232 pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data);
233
234 if (unlikely(ata_device_add(&ae) == 0)) {
235 ret = -ENODEV;
236 goto add_failed;
237 }
238
239 return 0;
240
241add_failed:
242 if (ae.port[0].ctl_addr && mmio)
243 iounmap((void __iomem *)ae.port[0].ctl_addr);
244bad_remap:
245 if (ae.port[0].cmd_addr && mmio)
246 iounmap((void __iomem *)ae.port[0].cmd_addr);
247
248 return ret;
249}
250
251/**
252 * pata_platform_remove - unplug a platform interface
253 * @pdev: platform device
254 *
255 * A platform bus ATA device has been unplugged. Perform the needed
256 * cleanup. Also called on module unload for any active devices.
257 */
258static int __devexit pata_platform_remove(struct platform_device *pdev)
259{
260 struct device *dev = &pdev->dev;
261 struct ata_host *host = dev_get_drvdata(dev);
262
263 ata_host_remove(host);
264 dev_set_drvdata(dev, NULL);
265
266 return 0;
267}
268
269static struct platform_driver pata_platform_driver = {
270 .probe = pata_platform_probe,
271 .remove = __devexit_p(pata_platform_remove),
272 .driver = {
273 .name = DRV_NAME,
274 .owner = THIS_MODULE,
275 },
276};
277
278static int __init pata_platform_init(void)
279{
280 return platform_driver_register(&pata_platform_driver);
281}
282
283static void __exit pata_platform_exit(void)
284{
285 platform_driver_unregister(&pata_platform_driver);
286}
287module_init(pata_platform_init);
288module_exit(pata_platform_exit);
289
290module_param(pio_mask, int, 0);
291
292MODULE_AUTHOR("Paul Mundt");
293MODULE_DESCRIPTION("low-level driver for platform device ATA");
294MODULE_LICENSE("GPL");
295MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 314938dea1fc..36f621abc390 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -157,7 +157,6 @@ static struct scsi_host_template qdi_sht = {
157 .can_queue = ATA_DEF_QUEUE, 157 .can_queue = ATA_DEF_QUEUE,
158 .this_id = ATA_SHT_THIS_ID, 158 .this_id = ATA_SHT_THIS_ID,
159 .sg_tablesize = LIBATA_MAX_PRD, 159 .sg_tablesize = LIBATA_MAX_PRD,
160 .max_sectors = ATA_MAX_SECTORS,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 160 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED, 161 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING, 162 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 048c2bb21ef1..065541d034ad 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -220,7 +220,6 @@ static struct scsi_host_template radisys_sht = {
220 .can_queue = ATA_DEF_QUEUE, 220 .can_queue = ATA_DEF_QUEUE,
221 .this_id = ATA_SHT_THIS_ID, 221 .this_id = ATA_SHT_THIS_ID,
222 .sg_tablesize = LIBATA_MAX_PRD, 222 .sg_tablesize = LIBATA_MAX_PRD,
223 .max_sectors = ATA_MAX_SECTORS,
224 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 223 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
225 .emulated = ATA_SHT_EMULATED, 224 .emulated = ATA_SHT_EMULATED,
226 .use_clustering = ATA_SHT_USE_CLUSTERING, 225 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -229,6 +228,8 @@ static struct scsi_host_template radisys_sht = {
229 .slave_configure = ata_scsi_slave_config, 228 .slave_configure = ata_scsi_slave_config,
230 .slave_destroy = ata_scsi_slave_destroy, 229 .slave_destroy = ata_scsi_slave_destroy,
231 .bios_param = ata_std_bios_param, 230 .bios_param = ata_std_bios_param,
231 .resume = ata_scsi_device_resume,
232 .suspend = ata_scsi_device_suspend,
232}; 233};
233 234
234static const struct ata_port_operations radisys_pata_ops = { 235static const struct ata_port_operations radisys_pata_ops = {
@@ -311,6 +312,8 @@ static struct pci_driver radisys_pci_driver = {
311 .id_table = radisys_pci_tbl, 312 .id_table = radisys_pci_tbl,
312 .probe = radisys_init_one, 313 .probe = radisys_init_one,
313 .remove = ata_pci_remove_one, 314 .remove = ata_pci_remove_one,
315 .suspend = ata_pci_device_suspend,
316 .resume = ata_pci_device_resume,
314}; 317};
315 318
316static int __init radisys_init(void) 319static int __init radisys_init(void)
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index e4e5ea423fef..3677c642c9f9 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -21,7 +21,7 @@
21#include <linux/libata.h> 21#include <linux/libata.h>
22 22
23#define DRV_NAME "pata_rz1000" 23#define DRV_NAME "pata_rz1000"
24#define DRV_VERSION "0.2.2" 24#define DRV_VERSION "0.2.3"
25 25
26 26
27/** 27/**
@@ -83,7 +83,6 @@ static struct scsi_host_template rz1000_sht = {
83 .can_queue = ATA_DEF_QUEUE, 83 .can_queue = ATA_DEF_QUEUE,
84 .this_id = ATA_SHT_THIS_ID, 84 .this_id = ATA_SHT_THIS_ID,
85 .sg_tablesize = LIBATA_MAX_PRD, 85 .sg_tablesize = LIBATA_MAX_PRD,
86 .max_sectors = ATA_MAX_SECTORS,
87 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 86 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
88 .emulated = ATA_SHT_EMULATED, 87 .emulated = ATA_SHT_EMULATED,
89 .use_clustering = ATA_SHT_USE_CLUSTERING, 88 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -92,6 +91,8 @@ static struct scsi_host_template rz1000_sht = {
92 .slave_configure = ata_scsi_slave_config, 91 .slave_configure = ata_scsi_slave_config,
93 .slave_destroy = ata_scsi_slave_destroy, 92 .slave_destroy = ata_scsi_slave_destroy,
94 .bios_param = ata_std_bios_param, 93 .bios_param = ata_std_bios_param,
94 .resume = ata_scsi_device_resume,
95 .suspend = ata_scsi_device_suspend,
95}; 96};
96 97
97static struct ata_port_operations rz1000_port_ops = { 98static struct ata_port_operations rz1000_port_ops = {
@@ -129,6 +130,19 @@ static struct ata_port_operations rz1000_port_ops = {
129 .host_stop = ata_host_stop 130 .host_stop = ata_host_stop
130}; 131};
131 132
133static int rz1000_fifo_disable(struct pci_dev *pdev)
134{
135 u16 reg;
136 /* Be exceptionally paranoid as we must be sure to apply the fix */
137 if (pci_read_config_word(pdev, 0x40, &reg) != 0)
138 return -1;
139 reg &= 0xDFFF;
140 if (pci_write_config_word(pdev, 0x40, reg) != 0)
141 return -1;
142 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
143 return 0;
144}
145
132/** 146/**
133 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services 147 * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
134 * @pdev: PCI device to register 148 * @pdev: PCI device to register
@@ -143,7 +157,6 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
143{ 157{
144 static int printed_version; 158 static int printed_version;
145 struct ata_port_info *port_info[2]; 159 struct ata_port_info *port_info[2];
146 u16 reg;
147 static struct ata_port_info info = { 160 static struct ata_port_info info = {
148 .sht = &rz1000_sht, 161 .sht = &rz1000_sht,
149 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 162 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
@@ -154,23 +167,25 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
154 if (!printed_version++) 167 if (!printed_version++)
155 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); 168 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
156 169
157 /* Be exceptionally paranoid as we must be sure to apply the fix */ 170 if (rz1000_fifo_disable(pdev) == 0) {
158 if (pci_read_config_word(pdev, 0x40, &reg) != 0) 171 port_info[0] = &info;
159 goto fail; 172 port_info[1] = &info;
160 reg &= 0xDFFF; 173 return ata_pci_init_one(pdev, port_info, 2);
161 if (pci_write_config_word(pdev, 0x40, reg) != 0) 174 }
162 goto fail;
163 printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
164
165 port_info[0] = &info;
166 port_info[1] = &info;
167 return ata_pci_init_one(pdev, port_info, 2);
168fail:
169 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n"); 175 printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
170 /* Not safe to use so skip */ 176 /* Not safe to use so skip */
171 return -ENODEV; 177 return -ENODEV;
172} 178}
173 179
180static int rz1000_reinit_one(struct pci_dev *pdev)
181{
182 /* If this fails on resume (which is a "cant happen" case), we
183 must stop as any progress risks data loss */
184 if (rz1000_fifo_disable(pdev))
185 panic("rz1000 fifo");
186 return ata_pci_device_resume(pdev);
187}
188
174static const struct pci_device_id pata_rz1000[] = { 189static const struct pci_device_id pata_rz1000[] = {
175 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, 190 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
176 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), }, 191 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
@@ -182,7 +197,9 @@ static struct pci_driver rz1000_pci_driver = {
182 .name = DRV_NAME, 197 .name = DRV_NAME,
183 .id_table = pata_rz1000, 198 .id_table = pata_rz1000,
184 .probe = rz1000_init_one, 199 .probe = rz1000_init_one,
185 .remove = ata_pci_remove_one 200 .remove = ata_pci_remove_one,
201 .suspend = ata_pci_device_suspend,
202 .resume = rz1000_reinit_one,
186}; 203};
187 204
188static int __init rz1000_init(void) 205static int __init rz1000_init(void)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 0c75dae74764..a3b35bc50394 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -40,7 +40,7 @@
40#include <linux/libata.h> 40#include <linux/libata.h>
41 41
42#define DRV_NAME "sc1200" 42#define DRV_NAME "sc1200"
43#define DRV_VERSION "0.2.3" 43#define DRV_VERSION "0.2.4"
44 44
45#define SC1200_REV_A 0x00 45#define SC1200_REV_A 0x00
46#define SC1200_REV_B1 0x01 46#define SC1200_REV_B1 0x01
@@ -186,7 +186,6 @@ static struct scsi_host_template sc1200_sht = {
186 .can_queue = ATA_DEF_QUEUE, 186 .can_queue = ATA_DEF_QUEUE,
187 .this_id = ATA_SHT_THIS_ID, 187 .this_id = ATA_SHT_THIS_ID,
188 .sg_tablesize = LIBATA_MAX_PRD, 188 .sg_tablesize = LIBATA_MAX_PRD,
189 .max_sectors = ATA_MAX_SECTORS,
190 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 189 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
191 .emulated = ATA_SHT_EMULATED, 190 .emulated = ATA_SHT_EMULATED,
192 .use_clustering = ATA_SHT_USE_CLUSTERING, 191 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -195,6 +194,8 @@ static struct scsi_host_template sc1200_sht = {
195 .slave_configure = ata_scsi_slave_config, 194 .slave_configure = ata_scsi_slave_config,
196 .slave_destroy = ata_scsi_slave_destroy, 195 .slave_destroy = ata_scsi_slave_destroy,
197 .bios_param = ata_std_bios_param, 196 .bios_param = ata_std_bios_param,
197 .resume = ata_scsi_device_resume,
198 .suspend = ata_scsi_device_suspend,
198}; 199};
199 200
200static struct ata_port_operations sc1200_port_ops = { 201static struct ata_port_operations sc1200_port_ops = {
@@ -264,7 +265,9 @@ static struct pci_driver sc1200_pci_driver = {
264 .name = DRV_NAME, 265 .name = DRV_NAME,
265 .id_table = sc1200, 266 .id_table = sc1200,
266 .probe = sc1200_init_one, 267 .probe = sc1200_init_one,
267 .remove = ata_pci_remove_one 268 .remove = ata_pci_remove_one,
269 .suspend = ata_pci_device_suspend,
270 .resume = ata_pci_device_resume,
268}; 271};
269 272
270static int __init sc1200_init(void) 273static int __init sc1200_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index be7f60efcb61..f02b6a3b0f10 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
41#include <linux/libata.h> 41#include <linux/libata.h>
42 42
43#define DRV_NAME "pata_serverworks" 43#define DRV_NAME "pata_serverworks"
44#define DRV_VERSION "0.3.7" 44#define DRV_VERSION "0.3.9"
45 45
46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 46#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 47#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -318,7 +318,6 @@ static struct scsi_host_template serverworks_sht = {
318 .can_queue = ATA_DEF_QUEUE, 318 .can_queue = ATA_DEF_QUEUE,
319 .this_id = ATA_SHT_THIS_ID, 319 .this_id = ATA_SHT_THIS_ID,
320 .sg_tablesize = LIBATA_MAX_PRD, 320 .sg_tablesize = LIBATA_MAX_PRD,
321 .max_sectors = ATA_MAX_SECTORS,
322 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 321 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
323 .emulated = ATA_SHT_EMULATED, 322 .emulated = ATA_SHT_EMULATED,
324 .use_clustering = ATA_SHT_USE_CLUSTERING, 323 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -327,6 +326,8 @@ static struct scsi_host_template serverworks_sht = {
327 .slave_configure = ata_scsi_slave_config, 326 .slave_configure = ata_scsi_slave_config,
328 .slave_destroy = ata_scsi_slave_destroy, 327 .slave_destroy = ata_scsi_slave_destroy,
329 .bios_param = ata_std_bios_param, 328 .bios_param = ata_std_bios_param,
329 .resume = ata_scsi_device_resume,
330 .suspend = ata_scsi_device_suspend,
330}; 331};
331 332
332static struct ata_port_operations serverworks_osb4_port_ops = { 333static struct ata_port_operations serverworks_osb4_port_ops = {
@@ -554,6 +555,30 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
554 return ata_pci_init_one(pdev, port_info, ports); 555 return ata_pci_init_one(pdev, port_info, ports);
555} 556}
556 557
558static int serverworks_reinit_one(struct pci_dev *pdev)
559{
560 /* Force master latency timer to 64 PCI clocks */
561 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
562
563 switch (pdev->device)
564 {
565 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
566 serverworks_fixup_osb4(pdev);
567 break;
568 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
569 ata_pci_clear_simplex(pdev);
570 /* fall through */
571 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
572 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
573 serverworks_fixup_csb(pdev);
574 break;
575 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
576 serverworks_fixup_ht1000(pdev);
577 break;
578 }
579 return ata_pci_device_resume(pdev);
580}
581
557static const struct pci_device_id serverworks[] = { 582static const struct pci_device_id serverworks[] = {
558 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, 583 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
559 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, 584 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
@@ -568,7 +593,9 @@ static struct pci_driver serverworks_pci_driver = {
568 .name = DRV_NAME, 593 .name = DRV_NAME,
569 .id_table = serverworks, 594 .id_table = serverworks,
570 .probe = serverworks_init_one, 595 .probe = serverworks_init_one,
571 .remove = ata_pci_remove_one 596 .remove = ata_pci_remove_one,
597 .suspend = ata_pci_device_suspend,
598 .resume = serverworks_reinit_one,
572}; 599};
573 600
574static int __init serverworks_init(void) 601static int __init serverworks_init(void)
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 11942fd03b55..32cf0bfa8921 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -33,7 +33,7 @@
33#include <linux/libata.h> 33#include <linux/libata.h>
34 34
35#define DRV_NAME "pata_sil680" 35#define DRV_NAME "pata_sil680"
36#define DRV_VERSION "0.3.2" 36#define DRV_VERSION "0.4.1"
37 37
38/** 38/**
39 * sil680_selreg - return register base 39 * sil680_selreg - return register base
@@ -218,7 +218,6 @@ static struct scsi_host_template sil680_sht = {
218 .can_queue = ATA_DEF_QUEUE, 218 .can_queue = ATA_DEF_QUEUE,
219 .this_id = ATA_SHT_THIS_ID, 219 .this_id = ATA_SHT_THIS_ID,
220 .sg_tablesize = LIBATA_MAX_PRD, 220 .sg_tablesize = LIBATA_MAX_PRD,
221 .max_sectors = ATA_MAX_SECTORS,
222 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 221 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
223 .emulated = ATA_SHT_EMULATED, 222 .emulated = ATA_SHT_EMULATED,
224 .use_clustering = ATA_SHT_USE_CLUSTERING, 223 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -263,32 +262,20 @@ static struct ata_port_operations sil680_port_ops = {
263 .host_stop = ata_host_stop 262 .host_stop = ata_host_stop
264}; 263};
265 264
266static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 265/**
266 * sil680_init_chip - chip setup
267 * @pdev: PCI device
268 *
269 * Perform all the chip setup which must be done both when the device
270 * is powered up on boot and when we resume in case we resumed from RAM.
271 * Returns the final clock settings.
272 */
273
274static u8 sil680_init_chip(struct pci_dev *pdev)
267{ 275{
268 static struct ata_port_info info = {
269 .sht = &sil680_sht,
270 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
271 .pio_mask = 0x1f,
272 .mwdma_mask = 0x07,
273 .udma_mask = 0x7f,
274 .port_ops = &sil680_port_ops
275 };
276 static struct ata_port_info info_slow = {
277 .sht = &sil680_sht,
278 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
279 .pio_mask = 0x1f,
280 .mwdma_mask = 0x07,
281 .udma_mask = 0x3f,
282 .port_ops = &sil680_port_ops
283 };
284 static struct ata_port_info *port_info[2] = {&info, &info};
285 static int printed_version;
286 u32 class_rev = 0; 276 u32 class_rev = 0;
287 u8 tmpbyte = 0; 277 u8 tmpbyte = 0;
288 278
289 if (!printed_version++)
290 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
291
292 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev); 279 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
293 class_rev &= 0xff; 280 class_rev &= 0xff;
294 /* FIXME: double check */ 281 /* FIXME: double check */
@@ -323,8 +310,6 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
323 pci_read_config_byte(pdev, 0x8A, &tmpbyte); 310 pci_read_config_byte(pdev, 0x8A, &tmpbyte);
324 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n", 311 printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
325 tmpbyte & 1, tmpbyte & 0x30); 312 tmpbyte & 1, tmpbyte & 0x30);
326 if ((tmpbyte & 0x30) == 0)
327 port_info[0] = port_info[1] = &info_slow;
328 313
329 pci_write_config_byte(pdev, 0xA1, 0x72); 314 pci_write_config_byte(pdev, 0xA1, 0x72);
330 pci_write_config_word(pdev, 0xA2, 0x328A); 315 pci_write_config_word(pdev, 0xA2, 0x328A);
@@ -343,11 +328,51 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
343 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break; 328 case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
344 /* This last case is _NOT_ ok */ 329 /* This last case is _NOT_ ok */
345 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n"); 330 case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
346 return -EIO; 331 }
332 return tmpbyte & 0x30;
333}
334
335static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
336{
337 static struct ata_port_info info = {
338 .sht = &sil680_sht,
339 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
340 .pio_mask = 0x1f,
341 .mwdma_mask = 0x07,
342 .udma_mask = 0x7f,
343 .port_ops = &sil680_port_ops
344 };
345 static struct ata_port_info info_slow = {
346 .sht = &sil680_sht,
347 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
348 .pio_mask = 0x1f,
349 .mwdma_mask = 0x07,
350 .udma_mask = 0x3f,
351 .port_ops = &sil680_port_ops
352 };
353 static struct ata_port_info *port_info[2] = {&info, &info};
354 static int printed_version;
355
356 if (!printed_version++)
357 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
358
359 switch(sil680_init_chip(pdev))
360 {
361 case 0:
362 port_info[0] = port_info[1] = &info_slow;
363 break;
364 case 0x30:
365 return -ENODEV;
347 } 366 }
348 return ata_pci_init_one(pdev, port_info, 2); 367 return ata_pci_init_one(pdev, port_info, 2);
349} 368}
350 369
370static int sil680_reinit_one(struct pci_dev *pdev)
371{
372 sil680_init_chip(pdev);
373 return ata_pci_device_resume(pdev);
374}
375
351static const struct pci_device_id sil680[] = { 376static const struct pci_device_id sil680[] = {
352 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, 377 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
353 378
@@ -358,7 +383,9 @@ static struct pci_driver sil680_pci_driver = {
358 .name = DRV_NAME, 383 .name = DRV_NAME,
359 .id_table = sil680, 384 .id_table = sil680,
360 .probe = sil680_init_one, 385 .probe = sil680_init_one,
361 .remove = ata_pci_remove_one 386 .remove = ata_pci_remove_one,
387 .suspend = ata_pci_device_suspend,
388 .resume = sil680_reinit_one,
362}; 389};
363 390
364static int __init sil680_init(void) 391static int __init sil680_init(void)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 91e85f90941d..916cedb3d755 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -34,7 +34,7 @@
34#include <linux/ata.h> 34#include <linux/ata.h>
35 35
36#define DRV_NAME "pata_sis" 36#define DRV_NAME "pata_sis"
37#define DRV_VERSION "0.4.4" 37#define DRV_VERSION "0.4.5"
38 38
39struct sis_chipset { 39struct sis_chipset {
40 u16 device; /* PCI host ID */ 40 u16 device; /* PCI host ID */
@@ -538,7 +538,6 @@ static struct scsi_host_template sis_sht = {
538 .can_queue = ATA_DEF_QUEUE, 538 .can_queue = ATA_DEF_QUEUE,
539 .this_id = ATA_SHT_THIS_ID, 539 .this_id = ATA_SHT_THIS_ID,
540 .sg_tablesize = LIBATA_MAX_PRD, 540 .sg_tablesize = LIBATA_MAX_PRD,
541 .max_sectors = ATA_MAX_SECTORS,
542 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 541 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
543 .emulated = ATA_SHT_EMULATED, 542 .emulated = ATA_SHT_EMULATED,
544 .use_clustering = ATA_SHT_USE_CLUSTERING, 543 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -547,6 +546,8 @@ static struct scsi_host_template sis_sht = {
547 .slave_configure = ata_scsi_slave_config, 546 .slave_configure = ata_scsi_slave_config,
548 .slave_destroy = ata_scsi_slave_destroy, 547 .slave_destroy = ata_scsi_slave_destroy,
549 .bios_param = ata_std_bios_param, 548 .bios_param = ata_std_bios_param,
549 .resume = ata_scsi_device_resume,
550 .suspend = ata_scsi_device_suspend,
550}; 551};
551 552
552static const struct ata_port_operations sis_133_ops = { 553static const struct ata_port_operations sis_133_ops = {
@@ -1000,6 +1001,8 @@ static struct pci_driver sis_pci_driver = {
1000 .id_table = sis_pci_tbl, 1001 .id_table = sis_pci_tbl,
1001 .probe = sis_init_one, 1002 .probe = sis_init_one,
1002 .remove = ata_pci_remove_one, 1003 .remove = ata_pci_remove_one,
1004 .suspend = ata_pci_device_suspend,
1005 .resume = ata_pci_device_resume,
1003}; 1006};
1004 1007
1005static int __init sis_init(void) 1008static int __init sis_init(void)
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index dc1cfc6d805b..e94f515ef54b 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -230,7 +230,6 @@ static struct scsi_host_template sl82c105_sht = {
230 .can_queue = ATA_DEF_QUEUE, 230 .can_queue = ATA_DEF_QUEUE,
231 .this_id = ATA_SHT_THIS_ID, 231 .this_id = ATA_SHT_THIS_ID,
232 .sg_tablesize = LIBATA_MAX_PRD, 232 .sg_tablesize = LIBATA_MAX_PRD,
233 .max_sectors = ATA_MAX_SECTORS,
234 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 233 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
235 .emulated = ATA_SHT_EMULATED, 234 .emulated = ATA_SHT_EMULATED,
236 .use_clustering = ATA_SHT_USE_CLUSTERING, 235 .use_clustering = ATA_SHT_USE_CLUSTERING,
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index bfda1f7e760a..a142971f1307 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -43,7 +43,7 @@
43#include <linux/libata.h> 43#include <linux/libata.h>
44 44
45#define DRV_NAME "pata_triflex" 45#define DRV_NAME "pata_triflex"
46#define DRV_VERSION "0.2.5" 46#define DRV_VERSION "0.2.7"
47 47
48/** 48/**
49 * triflex_prereset - probe begin 49 * triflex_prereset - probe begin
@@ -185,7 +185,6 @@ static struct scsi_host_template triflex_sht = {
185 .can_queue = ATA_DEF_QUEUE, 185 .can_queue = ATA_DEF_QUEUE,
186 .this_id = ATA_SHT_THIS_ID, 186 .this_id = ATA_SHT_THIS_ID,
187 .sg_tablesize = LIBATA_MAX_PRD, 187 .sg_tablesize = LIBATA_MAX_PRD,
188 .max_sectors = ATA_MAX_SECTORS,
189 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 188 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
190 .emulated = ATA_SHT_EMULATED, 189 .emulated = ATA_SHT_EMULATED,
191 .use_clustering = ATA_SHT_USE_CLUSTERING, 190 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -194,6 +193,8 @@ static struct scsi_host_template triflex_sht = {
194 .slave_configure = ata_scsi_slave_config, 193 .slave_configure = ata_scsi_slave_config,
195 .slave_destroy = ata_scsi_slave_destroy, 194 .slave_destroy = ata_scsi_slave_destroy,
196 .bios_param = ata_std_bios_param, 195 .bios_param = ata_std_bios_param,
196 .resume = ata_scsi_device_resume,
197 .suspend = ata_scsi_device_suspend,
197}; 198};
198 199
199static struct ata_port_operations triflex_port_ops = { 200static struct ata_port_operations triflex_port_ops = {
@@ -258,7 +259,9 @@ static struct pci_driver triflex_pci_driver = {
258 .name = DRV_NAME, 259 .name = DRV_NAME,
259 .id_table = triflex, 260 .id_table = triflex,
260 .probe = triflex_init_one, 261 .probe = triflex_init_one,
261 .remove = ata_pci_remove_one 262 .remove = ata_pci_remove_one,
263 .suspend = ata_pci_device_suspend,
264 .resume = ata_pci_device_resume,
262}; 265};
263 266
264static int __init triflex_init(void) 267static int __init triflex_init(void)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index c5f1616d224d..cc09d47fb927 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -23,6 +23,7 @@
23 * VIA VT8233c - UDMA100 23 * VIA VT8233c - UDMA100
24 * VIA VT8235 - UDMA133 24 * VIA VT8235 - UDMA133
25 * VIA VT8237 - UDMA133 25 * VIA VT8237 - UDMA133
26 * VIA VT8251 - UDMA133
26 * 27 *
27 * Most registers remain compatible across chips. Others start reserved 28 * Most registers remain compatible across chips. Others start reserved
28 * and acquire sensible semantics if set to 1 (eg cable detect). A few 29 * and acquire sensible semantics if set to 1 (eg cable detect). A few
@@ -60,7 +61,7 @@
60#include <linux/libata.h> 61#include <linux/libata.h>
61 62
62#define DRV_NAME "pata_via" 63#define DRV_NAME "pata_via"
63#define DRV_VERSION "0.1.14" 64#define DRV_VERSION "0.2.0"
64 65
65/* 66/*
66 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx 67 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -94,6 +95,7 @@ static const struct via_isa_bridge {
94 u8 rev_max; 95 u8 rev_max;
95 u16 flags; 96 u16 flags;
96} via_isa_bridges[] = { 97} via_isa_bridges[] = {
98 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
97 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 99 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
98 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES}, 100 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
99 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 101 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -288,7 +290,6 @@ static struct scsi_host_template via_sht = {
288 .can_queue = ATA_DEF_QUEUE, 290 .can_queue = ATA_DEF_QUEUE,
289 .this_id = ATA_SHT_THIS_ID, 291 .this_id = ATA_SHT_THIS_ID,
290 .sg_tablesize = LIBATA_MAX_PRD, 292 .sg_tablesize = LIBATA_MAX_PRD,
291 .max_sectors = ATA_MAX_SECTORS,
292 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 293 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
293 .emulated = ATA_SHT_EMULATED, 294 .emulated = ATA_SHT_EMULATED,
294 .use_clustering = ATA_SHT_USE_CLUSTERING, 295 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -297,6 +298,8 @@ static struct scsi_host_template via_sht = {
297 .slave_configure = ata_scsi_slave_config, 298 .slave_configure = ata_scsi_slave_config,
298 .slave_destroy = ata_scsi_slave_destroy, 299 .slave_destroy = ata_scsi_slave_destroy,
299 .bios_param = ata_std_bios_param, 300 .bios_param = ata_std_bios_param,
301 .resume = ata_scsi_device_resume,
302 .suspend = ata_scsi_device_suspend,
300}; 303};
301 304
302static struct ata_port_operations via_port_ops = { 305static struct ata_port_operations via_port_ops = {
@@ -370,8 +373,42 @@ static struct ata_port_operations via_port_ops_noirq = {
370}; 373};
371 374
372/** 375/**
376 * via_config_fifo - set up the FIFO
377 * @pdev: PCI device
378 * @flags: configuration flags
379 *
380 * Set the FIFO properties for this device if neccessary. Used both on
381 * set up and on and the resume path
382 */
383
384static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
385{
386 u8 enable;
387
388 /* 0x40 low bits indicate enabled channels */
389 pci_read_config_byte(pdev, 0x40 , &enable);
390 enable &= 3;
391
392 if (flags & VIA_SET_FIFO) {
393 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
394 u8 fifo;
395
396 pci_read_config_byte(pdev, 0x43, &fifo);
397
398 /* Clear PREQ# until DDACK# for errata */
399 if (flags & VIA_BAD_PREQ)
400 fifo &= 0x7F;
401 else
402 fifo &= 0x9f;
403 /* Turn on FIFO for enabled channels */
404 fifo |= fifo_setting[enable];
405 pci_write_config_byte(pdev, 0x43, fifo);
406 }
407}
408
409/**
373 * via_init_one - discovery callback 410 * via_init_one - discovery callback
374 * @pdev: PCI device ID 411 * @pdev: PCI device
375 * @id: PCI table info 412 * @id: PCI table info
376 * 413 *
377 * A VIA IDE interface has been discovered. Figure out what revision 414 * A VIA IDE interface has been discovered. Figure out what revision
@@ -383,7 +420,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
383 /* Early VIA without UDMA support */ 420 /* Early VIA without UDMA support */
384 static struct ata_port_info via_mwdma_info = { 421 static struct ata_port_info via_mwdma_info = {
385 .sht = &via_sht, 422 .sht = &via_sht,
386 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 423 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
387 .pio_mask = 0x1f, 424 .pio_mask = 0x1f,
388 .mwdma_mask = 0x07, 425 .mwdma_mask = 0x07,
389 .port_ops = &via_port_ops 426 .port_ops = &via_port_ops
@@ -391,7 +428,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
391 /* Ditto with IRQ masking required */ 428 /* Ditto with IRQ masking required */
392 static struct ata_port_info via_mwdma_info_borked = { 429 static struct ata_port_info via_mwdma_info_borked = {
393 .sht = &via_sht, 430 .sht = &via_sht,
394 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 431 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
395 .pio_mask = 0x1f, 432 .pio_mask = 0x1f,
396 .mwdma_mask = 0x07, 433 .mwdma_mask = 0x07,
397 .port_ops = &via_port_ops_noirq, 434 .port_ops = &via_port_ops_noirq,
@@ -399,7 +436,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
399 /* VIA UDMA 33 devices (and borked 66) */ 436 /* VIA UDMA 33 devices (and borked 66) */
400 static struct ata_port_info via_udma33_info = { 437 static struct ata_port_info via_udma33_info = {
401 .sht = &via_sht, 438 .sht = &via_sht,
402 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 439 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
403 .pio_mask = 0x1f, 440 .pio_mask = 0x1f,
404 .mwdma_mask = 0x07, 441 .mwdma_mask = 0x07,
405 .udma_mask = 0x7, 442 .udma_mask = 0x7,
@@ -408,7 +445,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
408 /* VIA UDMA 66 devices */ 445 /* VIA UDMA 66 devices */
409 static struct ata_port_info via_udma66_info = { 446 static struct ata_port_info via_udma66_info = {
410 .sht = &via_sht, 447 .sht = &via_sht,
411 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 448 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
412 .pio_mask = 0x1f, 449 .pio_mask = 0x1f,
413 .mwdma_mask = 0x07, 450 .mwdma_mask = 0x07,
414 .udma_mask = 0x1f, 451 .udma_mask = 0x1f,
@@ -417,7 +454,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
417 /* VIA UDMA 100 devices */ 454 /* VIA UDMA 100 devices */
418 static struct ata_port_info via_udma100_info = { 455 static struct ata_port_info via_udma100_info = {
419 .sht = &via_sht, 456 .sht = &via_sht,
420 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 457 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
421 .pio_mask = 0x1f, 458 .pio_mask = 0x1f,
422 .mwdma_mask = 0x07, 459 .mwdma_mask = 0x07,
423 .udma_mask = 0x3f, 460 .udma_mask = 0x3f,
@@ -426,7 +463,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
426 /* UDMA133 with bad AST (All current 133) */ 463 /* UDMA133 with bad AST (All current 133) */
427 static struct ata_port_info via_udma133_info = { 464 static struct ata_port_info via_udma133_info = {
428 .sht = &via_sht, 465 .sht = &via_sht,
429 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 466 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
430 .pio_mask = 0x1f, 467 .pio_mask = 0x1f,
431 .mwdma_mask = 0x07, 468 .mwdma_mask = 0x07,
432 .udma_mask = 0x7f, /* FIXME: should check north bridge */ 469 .udma_mask = 0x7f, /* FIXME: should check north bridge */
@@ -471,21 +508,8 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
471 } 508 }
472 509
473 /* Initialise the FIFO for the enabled channels. */ 510 /* Initialise the FIFO for the enabled channels. */
474 if (config->flags & VIA_SET_FIFO) { 511 via_config_fifo(pdev, config->flags);
475 u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; 512
476 u8 fifo;
477
478 pci_read_config_byte(pdev, 0x43, &fifo);
479
480 /* Clear PREQ# until DDACK# for errata */
481 if (config->flags & VIA_BAD_PREQ)
482 fifo &= 0x7F;
483 else
484 fifo &= 0x9f;
485 /* Turn on FIFO for enabled channels */
486 fifo |= fifo_setting[enable];
487 pci_write_config_byte(pdev, 0x43, fifo);
488 }
489 /* Clock set up */ 513 /* Clock set up */
490 switch(config->flags & VIA_UDMA) { 514 switch(config->flags & VIA_UDMA) {
491 case VIA_UDMA_NONE: 515 case VIA_UDMA_NONE:
@@ -529,6 +553,39 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
529 return ata_pci_init_one(pdev, port_info, 2); 553 return ata_pci_init_one(pdev, port_info, 2);
530} 554}
531 555
556/**
557 * via_reinit_one - reinit after resume
558 * @pdev; PCI device
559 *
560 * Called when the VIA PATA device is resumed. We must then
561 * reconfigure the fifo and other setup we may have altered. In
562 * addition the kernel needs to have the resume methods on PCI
563 * quirk supported.
564 */
565
566static int via_reinit_one(struct pci_dev *pdev)
567{
568 u32 timing;
569 struct ata_host *host = dev_get_drvdata(&pdev->dev);
570 const struct via_isa_bridge *config = host->private_data;
571
572 via_config_fifo(pdev, config->flags);
573
574 if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
575 /* The 66 MHz devices require we enable the clock */
576 pci_read_config_dword(pdev, 0x50, &timing);
577 timing |= 0x80008;
578 pci_write_config_dword(pdev, 0x50, timing);
579 }
580 if (config->flags & VIA_BAD_CLK66) {
581 /* Disable the 66MHz clock on problem devices */
582 pci_read_config_dword(pdev, 0x50, &timing);
583 timing &= ~0x80008;
584 pci_write_config_dword(pdev, 0x50, timing);
585 }
586 return ata_pci_device_resume(pdev);
587}
588
532static const struct pci_device_id via[] = { 589static const struct pci_device_id via[] = {
533 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), }, 590 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
534 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), }, 591 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), },
@@ -542,7 +599,9 @@ static struct pci_driver via_pci_driver = {
542 .name = DRV_NAME, 599 .name = DRV_NAME,
543 .id_table = via, 600 .id_table = via,
544 .probe = via_init_one, 601 .probe = via_init_one,
545 .remove = ata_pci_remove_one 602 .remove = ata_pci_remove_one,
603 .suspend = ata_pci_device_suspend,
604 .resume = via_reinit_one,
546}; 605};
547 606
548static int __init via_init(void) 607static int __init via_init(void)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
new file mode 100644
index 000000000000..3ea345cde52e
--- /dev/null
+++ b/drivers/ata/pata_winbond.c
@@ -0,0 +1,306 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat <alan@redhat.com>
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/blkdev.h>
14#include <linux/delay.h>
15#include <scsi/scsi_host.h>
16#include <linux/libata.h>
17#include <linux/platform_device.h>
18
19#define DRV_NAME "pata_winbond"
20#define DRV_VERSION "0.0.1"
21
22#define NR_HOST 4 /* Two winbond controllers, two channels each */
23
24struct winbond_data {
25 unsigned long config;
26 struct platform_device *platform_dev;
27};
28
29static struct ata_host *winbond_host[NR_HOST];
30static struct winbond_data winbond_data[NR_HOST];
31static int nr_winbond_host;
32
33#ifdef MODULE
34static int probe_winbond = 1;
35#else
36static int probe_winbond;
37#endif
38
39static spinlock_t winbond_lock = SPIN_LOCK_UNLOCKED;
40
41static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
42{
43 unsigned long flags;
44 spin_lock_irqsave(&winbond_lock, flags);
45 outb(reg, port + 0x01);
46 outb(val, port + 0x02);
47 spin_unlock_irqrestore(&winbond_lock, flags);
48}
49
50static u8 winbond_readcfg(unsigned long port, u8 reg)
51{
52 u8 val;
53
54 unsigned long flags;
55 spin_lock_irqsave(&winbond_lock, flags);
56 outb(reg, port + 0x01);
57 val = inb(port + 0x02);
58 spin_unlock_irqrestore(&winbond_lock, flags);
59
60 return val;
61}
62
63static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
64{
65 struct ata_timing t;
66 struct winbond_data *winbond = ap->host->private_data;
67 int active, recovery;
68 u8 reg;
69 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
70
71 reg = winbond_readcfg(winbond->config, 0x81);
72
73 /* Get the timing data in cycles */
74 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
75 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
76 else
77 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
78
79 active = (FIT(t.active, 3, 17) - 1) & 0x0F;
80 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
81 timing = (active << 4) | recovery;
82 winbond_writecfg(winbond->config, timing, reg);
83
84 /* Load the setup timing */
85
86 reg = 0x35;
87 if (adev->class != ATA_DEV_ATA)
88 reg |= 0x08; /* FIFO off */
89 if (!ata_pio_need_iordy(adev))
90 reg |= 0x02; /* IORDY off */
91 reg |= (FIT(t.setup, 0, 3) << 6);
92 winbond_writecfg(winbond->config, timing + 1, reg);
93}
94
95
96static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
97{
98 struct ata_port *ap = adev->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(adev->id)) {
102 if (write_data)
103 outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 insl(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 u32 pad;
109 if (write_data) {
110 memcpy(&pad, buf + buflen - slop, slop);
111 outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
112 } else {
113 pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
114 memcpy(buf + buflen - slop, &pad, slop);
115 }
116 }
117 } else
118 ata_pio_data_xfer(adev, buf, buflen, write_data);
119}
120
121static struct scsi_host_template winbond_sht = {
122 .module = THIS_MODULE,
123 .name = DRV_NAME,
124 .ioctl = ata_scsi_ioctl,
125 .queuecommand = ata_scsi_queuecmd,
126 .can_queue = ATA_DEF_QUEUE,
127 .this_id = ATA_SHT_THIS_ID,
128 .sg_tablesize = LIBATA_MAX_PRD,
129 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
130 .emulated = ATA_SHT_EMULATED,
131 .use_clustering = ATA_SHT_USE_CLUSTERING,
132 .proc_name = DRV_NAME,
133 .dma_boundary = ATA_DMA_BOUNDARY,
134 .slave_configure = ata_scsi_slave_config,
135 .slave_destroy = ata_scsi_slave_destroy,
136 .bios_param = ata_std_bios_param,
137};
138
139static struct ata_port_operations winbond_port_ops = {
140 .port_disable = ata_port_disable,
141 .set_piomode = winbond_set_piomode,
142
143 .tf_load = ata_tf_load,
144 .tf_read = ata_tf_read,
145 .check_status = ata_check_status,
146 .exec_command = ata_exec_command,
147 .dev_select = ata_std_dev_select,
148
149 .freeze = ata_bmdma_freeze,
150 .thaw = ata_bmdma_thaw,
151 .error_handler = ata_bmdma_error_handler,
152 .post_internal_cmd = ata_bmdma_post_internal_cmd,
153
154 .qc_prep = ata_qc_prep,
155 .qc_issue = ata_qc_issue_prot,
156
157 .data_xfer = winbond_data_xfer,
158
159 .irq_handler = ata_interrupt,
160 .irq_clear = ata_bmdma_irq_clear,
161
162 .port_start = ata_port_start,
163 .port_stop = ata_port_stop,
164 .host_stop = ata_host_stop
165};
166
167/**
168 * winbond_init_one - attach a winbond interface
169 * @type: Type to display
170 * @io: I/O port start
171 * @irq: interrupt line
172 * @fast: True if on a > 33Mhz VLB
173 *
174 * Register a VLB bus IDE interface. Such interfaces are PIO and we
175 * assume do not support IRQ sharing.
176 */
177
178static __init int winbond_init_one(unsigned long port)
179{
180 struct ata_probe_ent ae;
181 struct platform_device *pdev;
182 int ret;
183 u8 reg;
184 int i;
185
186 reg = winbond_readcfg(port, 0x81);
187 reg |= 0x80; /* jumpered mode off */
188 winbond_writecfg(port, 0x81, reg);
189 reg = winbond_readcfg(port, 0x83);
190 reg |= 0xF0; /* local control */
191 winbond_writecfg(port, 0x83, reg);
192 reg = winbond_readcfg(port, 0x85);
193 reg |= 0xF0; /* programmable timing */
194 winbond_writecfg(port, 0x85, reg);
195
196 reg = winbond_readcfg(port, 0x81);
197
198 if (!(reg & 0x03)) /* Disabled */
199 return 0;
200
201 for (i = 0; i < 2 ; i ++) {
202
203 if (reg & (1 << i)) {
204 /*
205 * Fill in a probe structure first of all
206 */
207
208 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
209 if (pdev == NULL)
210 return -ENOMEM;
211
212 memset(&ae, 0, sizeof(struct ata_probe_ent));
213 INIT_LIST_HEAD(&ae.node);
214 ae.dev = &pdev->dev;
215
216 ae.port_ops = &winbond_port_ops;
217 ae.pio_mask = 0x1F;
218
219 ae.sht = &winbond_sht;
220
221 ae.n_ports = 1;
222 ae.irq = 14 + i;
223 ae.irq_flags = 0;
224 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
225 ae.port[0].cmd_addr = 0x1F0 - (0x80 * i);
226 ae.port[0].altstatus_addr = ae.port[0].cmd_addr + 0x0206;
227 ae.port[0].ctl_addr = ae.port[0].altstatus_addr;
228 ata_std_ports(&ae.port[0]);
229 /*
230 * Hook in a private data structure per channel
231 */
232 ae.private_data = &winbond_data[nr_winbond_host];
233 winbond_data[nr_winbond_host].config = port;
234 winbond_data[nr_winbond_host].platform_dev = pdev;
235
236 ret = ata_device_add(&ae);
237 if (ret == 0) {
238 platform_device_unregister(pdev);
239 return -ENODEV;
240 }
241 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
242 }
243 }
244
245 return 0;
246}
247
248/**
249 * winbond_init - attach winbond interfaces
250 *
251 * Attach winbond IDE interfaces by scanning the ports it may occupy.
252 */
253
254static __init int winbond_init(void)
255{
256 static const unsigned long config[2] = { 0x130, 0x1B0 };
257
258 int ct = 0;
259 int i;
260
261 if (probe_winbond == 0)
262 return -ENODEV;
263
264 /*
265 * Check both base addresses
266 */
267
268 for (i = 0; i < 2; i++) {
269 if (probe_winbond & (1<<i)) {
270 int ret = 0;
271 unsigned long port = config[i];
272
273 if (request_region(port, 2, "pata_winbond")) {
274 ret = winbond_init_one(port);
275 if(ret <= 0)
276 release_region(port, 2);
277 else ct+= ret;
278 }
279 }
280 }
281 if (ct != 0)
282 return 0;
283 return -ENODEV;
284}
285
286static __exit void winbond_exit(void)
287{
288 int i;
289
290 for (i = 0; i < nr_winbond_host; i++) {
291 ata_host_remove(winbond_host[i]);
292 release_region(winbond_data[i].config, 2);
293 platform_device_unregister(winbond_data[i].platform_dev);
294 }
295}
296
297MODULE_AUTHOR("Alan Cox");
298MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
299MODULE_LICENSE("GPL");
300MODULE_VERSION(DRV_VERSION);
301
302module_init(winbond_init);
303module_exit(winbond_exit);
304
305module_param(probe_winbond, int, 0);
306
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index d65ebfd7c7b2..0d316eb3c214 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -29,6 +29,11 @@
29 * NV-specific details such as register offsets, SATA phy location, 29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc. 30 * hotplug info, etc.
31 * 31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
32 */ 37 */
33 38
34#include <linux/kernel.h> 39#include <linux/kernel.h>
@@ -40,10 +45,13 @@
40#include <linux/interrupt.h> 45#include <linux/interrupt.h>
41#include <linux/device.h> 46#include <linux/device.h>
42#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
48#include <scsi/scsi_device.h>
43#include <linux/libata.h> 49#include <linux/libata.h>
44 50
45#define DRV_NAME "sata_nv" 51#define DRV_NAME "sata_nv"
46#define DRV_VERSION "2.0" 52#define DRV_VERSION "3.2"
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
47 55
48enum { 56enum {
49 NV_PORTS = 2, 57 NV_PORTS = 2,
@@ -78,8 +86,138 @@ enum {
78 // For PCI config register 20 86 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50, 87 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, 88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
169
170};
171
172/* ADMA Physical Region Descriptor - one SG segment */
173struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
179};
180
181enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
189};
190
191/* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
208};
209
210
211struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
81}; 217};
82 218
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220
83static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84static void nv_ck804_host_stop(struct ata_host *host); 222static void nv_ck804_host_stop(struct ata_host *host);
85static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
@@ -93,13 +231,28 @@ static void nv_nf2_thaw(struct ata_port *ap);
93static void nv_ck804_freeze(struct ata_port *ap); 231static void nv_ck804_freeze(struct ata_port *ap);
94static void nv_ck804_thaw(struct ata_port *ap); 232static void nv_ck804_thaw(struct ata_port *ap);
95static void nv_error_handler(struct ata_port *ap); 233static void nv_error_handler(struct ata_port *ap);
234static int nv_adma_slave_config(struct scsi_device *sdev);
235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247static u8 nv_adma_bmdma_status(struct ata_port *ap);
96 248
97enum nv_host_type 249enum nv_host_type
98{ 250{
99 GENERIC, 251 GENERIC,
100 NFORCE2, 252 NFORCE2,
101 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ 253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
102 CK804 254 CK804,
255 ADMA
103}; 256};
104 257
105static const struct pci_device_id nv_pci_tbl[] = { 258static const struct pci_device_id nv_pci_tbl[] = {
@@ -160,6 +313,24 @@ static struct scsi_host_template nv_sht = {
160 .bios_param = ata_std_bios_param, 313 .bios_param = ata_std_bios_param,
161}; 314};
162 315
316static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
329 .slave_configure = nv_adma_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
332};
333
163static const struct ata_port_operations nv_generic_ops = { 334static const struct ata_port_operations nv_generic_ops = {
164 .port_disable = ata_port_disable, 335 .port_disable = ata_port_disable,
165 .tf_load = ata_tf_load, 336 .tf_load = ata_tf_load,
@@ -241,11 +412,40 @@ static const struct ata_port_operations nv_ck804_ops = {
241 .host_stop = nv_ck804_host_stop, 412 .host_stop = nv_ck804_host_stop,
242}; 413};
243 414
415static const struct ata_port_operations nv_adma_ops = {
416 .port_disable = ata_port_disable,
417 .tf_load = ata_tf_load,
418 .tf_read = ata_tf_read,
419 .check_atapi_dma = nv_adma_check_atapi_dma,
420 .exec_command = ata_exec_command,
421 .check_status = ata_check_status,
422 .dev_select = ata_std_dev_select,
423 .bmdma_setup = nv_adma_bmdma_setup,
424 .bmdma_start = nv_adma_bmdma_start,
425 .bmdma_stop = nv_adma_bmdma_stop,
426 .bmdma_status = nv_adma_bmdma_status,
427 .qc_prep = nv_adma_qc_prep,
428 .qc_issue = nv_adma_qc_issue,
429 .freeze = nv_ck804_freeze,
430 .thaw = nv_ck804_thaw,
431 .error_handler = nv_adma_error_handler,
432 .post_internal_cmd = nv_adma_bmdma_stop,
433 .data_xfer = ata_mmio_data_xfer,
434 .irq_handler = nv_adma_interrupt,
435 .irq_clear = nv_adma_irq_clear,
436 .scr_read = nv_scr_read,
437 .scr_write = nv_scr_write,
438 .port_start = nv_adma_port_start,
439 .port_stop = nv_adma_port_stop,
440 .host_stop = nv_adma_host_stop,
441};
442
244static struct ata_port_info nv_port_info[] = { 443static struct ata_port_info nv_port_info[] = {
245 /* generic */ 444 /* generic */
246 { 445 {
247 .sht = &nv_sht, 446 .sht = &nv_sht,
248 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_HRST_TO_RESUME,
249 .pio_mask = NV_PIO_MASK, 449 .pio_mask = NV_PIO_MASK,
250 .mwdma_mask = NV_MWDMA_MASK, 450 .mwdma_mask = NV_MWDMA_MASK,
251 .udma_mask = NV_UDMA_MASK, 451 .udma_mask = NV_UDMA_MASK,
@@ -254,7 +454,8 @@ static struct ata_port_info nv_port_info[] = {
254 /* nforce2/3 */ 454 /* nforce2/3 */
255 { 455 {
256 .sht = &nv_sht, 456 .sht = &nv_sht,
257 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 457 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
458 ATA_FLAG_HRST_TO_RESUME,
258 .pio_mask = NV_PIO_MASK, 459 .pio_mask = NV_PIO_MASK,
259 .mwdma_mask = NV_MWDMA_MASK, 460 .mwdma_mask = NV_MWDMA_MASK,
260 .udma_mask = NV_UDMA_MASK, 461 .udma_mask = NV_UDMA_MASK,
@@ -263,12 +464,23 @@ static struct ata_port_info nv_port_info[] = {
263 /* ck804 */ 464 /* ck804 */
264 { 465 {
265 .sht = &nv_sht, 466 .sht = &nv_sht,
266 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 467 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
468 ATA_FLAG_HRST_TO_RESUME,
267 .pio_mask = NV_PIO_MASK, 469 .pio_mask = NV_PIO_MASK,
268 .mwdma_mask = NV_MWDMA_MASK, 470 .mwdma_mask = NV_MWDMA_MASK,
269 .udma_mask = NV_UDMA_MASK, 471 .udma_mask = NV_UDMA_MASK,
270 .port_ops = &nv_ck804_ops, 472 .port_ops = &nv_ck804_ops,
271 }, 473 },
474 /* ADMA */
475 {
476 .sht = &nv_adma_sht,
477 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
478 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
479 .pio_mask = NV_PIO_MASK,
480 .mwdma_mask = NV_MWDMA_MASK,
481 .udma_mask = NV_UDMA_MASK,
482 .port_ops = &nv_adma_ops,
483 },
272}; 484};
273 485
274MODULE_AUTHOR("NVIDIA"); 486MODULE_AUTHOR("NVIDIA");
@@ -277,37 +489,220 @@ MODULE_LICENSE("GPL");
277MODULE_DEVICE_TABLE(pci, nv_pci_tbl); 489MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
278MODULE_VERSION(DRV_VERSION); 490MODULE_VERSION(DRV_VERSION);
279 491
280static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) 492static int adma_enabled = 1;
493
494static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
495 unsigned int port_no)
281{ 496{
282 struct ata_host *host = dev_instance; 497 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
283 unsigned int i; 498 return mmio;
284 unsigned int handled = 0; 499}
285 unsigned long flags;
286 500
287 spin_lock_irqsave(&host->lock, flags); 501static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
502{
503 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
504}
288 505
289 for (i = 0; i < host->n_ports; i++) { 506static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
290 struct ata_port *ap; 507{
508 return (ap->host->mmio_base + NV_ADMA_GEN);
509}
291 510
292 ap = host->ports[i]; 511static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
293 if (ap && 512{
294 !(ap->flags & ATA_FLAG_DISABLED)) { 513 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
295 struct ata_queued_cmd *qc; 514}
296 515
297 qc = ata_qc_from_tag(ap, ap->active_tag); 516static void nv_adma_register_mode(struct ata_port *ap)
298 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 517{
299 handled += ata_host_intr(ap, qc); 518 void __iomem *mmio = nv_adma_ctl_block(ap);
300 else 519 struct nv_adma_port_priv *pp = ap->private_data;
301 // No request pending? Clear interrupt status 520 u16 tmp;
302 // anyway, in case there's one pending. 521
303 ap->ops->check_status(ap); 522 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
304 } 523 return;
524
525 tmp = readw(mmio + NV_ADMA_CTL);
526 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
527
528 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
529}
530
531static void nv_adma_mode(struct ata_port *ap)
532{
533 void __iomem *mmio = nv_adma_ctl_block(ap);
534 struct nv_adma_port_priv *pp = ap->private_data;
535 u16 tmp;
305 536
537 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
538 return;
539
540 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
541
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
544
545 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
546}
547
548static int nv_adma_slave_config(struct scsi_device *sdev)
549{
550 struct ata_port *ap = ata_shost_to_port(sdev->host);
551 struct nv_adma_port_priv *pp = ap->private_data;
552 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
553 u64 bounce_limit;
554 unsigned long segment_boundary;
555 unsigned short sg_tablesize;
556 int rc;
557 int adma_enable;
558 u32 current_reg, new_reg, config_mask;
559
560 rc = ata_scsi_slave_config(sdev);
561
562 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
563 /* Not a proper libata device, ignore */
564 return rc;
565
566 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
567 /*
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
573 */
574 bounce_limit = ATA_DMA_MASK;
575 segment_boundary = ATA_DMA_BOUNDARY;
576 /* Subtract 1 since an extra entry may be needed for padding, see
577 libata-scsi.c */
578 sg_tablesize = LIBATA_MAX_PRD - 1;
579
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
581 on the port. */
582 adma_enable = 0;
583 nv_adma_register_mode(ap);
584 }
585 else {
586 bounce_limit = *ap->dev->dma_mask;
587 segment_boundary = NV_ADMA_DMA_BOUNDARY;
588 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
589 adma_enable = 1;
590 }
591
592 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
593
594 if(ap->port_no == 1)
595 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
597 else
598 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
600
601 if(adma_enable) {
602 new_reg = current_reg | config_mask;
603 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
604 }
605 else {
606 new_reg = current_reg & ~config_mask;
607 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
306 } 608 }
609
610 if(current_reg != new_reg)
611 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
612
613 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
614 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
615 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
616 ata_port_printk(ap, KERN_INFO,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
619 return rc;
620}
307 621
308 spin_unlock_irqrestore(&host->lock, flags); 622static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
623{
624 struct nv_adma_port_priv *pp = qc->ap->private_data;
625 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
626}
309 627
310 return IRQ_RETVAL(handled); 628static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
629{
630 unsigned int idx = 0;
631
632 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
633
634 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
635 cpb[idx++] = cpu_to_le16(IGN);
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
640 }
641 else {
642 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
643 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
647 }
648 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
649 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
650 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
653
654 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
655
656 return idx;
657}
658
659static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
660{
661 struct nv_adma_port_priv *pp = ap->private_data;
662 int complete = 0, have_err = 0;
663 u8 flags = pp->cpb[cpb_num].resp_flags;
664
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
666
667 if (flags & NV_CPB_RESP_DONE) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags);
669 complete = 1;
670 }
671 if (flags & NV_CPB_RESP_ATA_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
673 have_err = 1;
674 complete = 1;
675 }
676 if (flags & NV_CPB_RESP_CMD_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
678 have_err = 1;
679 complete = 1;
680 }
681 if (flags & NV_CPB_RESP_CPB_ERR) {
682 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
683 have_err = 1;
684 complete = 1;
685 }
686 if(complete || force_err)
687 {
688 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
689 if(likely(qc)) {
690 u8 ata_status = 0;
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc->tf.protocol != ATA_PROT_NCQ)
695 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
696
697 if(have_err || force_err)
698 ata_status |= ATA_ERR;
699
700 qc->err_mask |= ac_err_mask(ata_status);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
702 qc->err_mask);
703 ata_qc_complete(qc);
704 }
705 }
311} 706}
312 707
313static int nv_host_intr(struct ata_port *ap, u8 irq_stat) 708static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
@@ -341,6 +736,486 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
341 return 1; 736 return 1;
342} 737}
343 738
739static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
740{
741 struct ata_host *host = dev_instance;
742 int i, handled = 0;
743 u32 notifier_clears[2];
744
745 spin_lock(&host->lock);
746
747 for (i = 0; i < host->n_ports; i++) {
748 struct ata_port *ap = host->ports[i];
749 notifier_clears[i] = 0;
750
751 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
752 struct nv_adma_port_priv *pp = ap->private_data;
753 void __iomem *mmio = nv_adma_ctl_block(ap);
754 u16 status;
755 u32 gen_ctl;
756 int have_global_err = 0;
757 u32 notifier, notifier_error;
758
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
761 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
762 >> (NV_INT_PORT_SHIFT * i);
763 handled += nv_host_intr(ap, irq_stat);
764 continue;
765 }
766
767 notifier = readl(mmio + NV_ADMA_NOTIFIER);
768 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
769 notifier_clears[i] = notifier | notifier_error;
770
771 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
772
773 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
774 !notifier_error)
775 /* Nothing to do */
776 continue;
777
778 status = readw(mmio + NV_ADMA_STAT);
779
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status, mmio + NV_ADMA_STAT);
784 readw(mmio + NV_ADMA_STAT); /* flush posted write */
785 rmb();
786
787 /* freeze if hotplugged */
788 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
789 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
790 ata_port_freeze(ap);
791 handled++;
792 continue;
793 }
794
795 if (status & NV_ADMA_STAT_TIMEOUT) {
796 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
797 have_global_err = 1;
798 }
799 if (status & NV_ADMA_STAT_CPBERR) {
800 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
801 have_global_err = 1;
802 }
803 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
804 /** Check CPBs for completed commands */
805
806 if(ata_tag_valid(ap->active_tag))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
809 (notifier_error & (1 << ap->active_tag)));
810 else {
811 int pos;
812 u32 active = ap->sactive;
813 while( (pos = ffs(active)) ) {
814 pos--;
815 nv_adma_check_cpb(ap, pos, have_global_err ||
816 (notifier_error & (1 << pos)) );
817 active &= ~(1 << pos );
818 }
819 }
820 }
821
822 handled++; /* irq handled if we got here */
823 }
824 }
825
826 if(notifier_clears[0] || notifier_clears[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears[0],
830 nv_adma_notifier_clear_block(host->ports[0]));
831 writel(notifier_clears[1],
832 nv_adma_notifier_clear_block(host->ports[1]));
833 }
834
835 spin_unlock(&host->lock);
836
837 return IRQ_RETVAL(handled);
838}
839
840static void nv_adma_irq_clear(struct ata_port *ap)
841{
842 void __iomem *mmio = nv_adma_ctl_block(ap);
843 u16 status = readw(mmio + NV_ADMA_STAT);
844 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
845 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
846 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
847
848 /* clear ADMA status */
849 writew(status, mmio + NV_ADMA_STAT);
850 writel(notifier | notifier_error,
851 nv_adma_notifier_clear_block(ap));
852
853 /** clear legacy status */
854 outb(inb(dma_stat_addr), dma_stat_addr);
855}
856
857static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
858{
859 struct ata_port *ap = qc->ap;
860 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
861 struct nv_adma_port_priv *pp = ap->private_data;
862 u8 dmactl;
863
864 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
865 WARN_ON(1);
866 return;
867 }
868
869 /* load PRD table addr. */
870 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
871
872 /* specify data direction, triple-check start bit is clear */
873 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
875 if (!rw)
876 dmactl |= ATA_DMA_WR;
877
878 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
879
880 /* issue r/w command */
881 ata_exec_command(ap, &qc->tf);
882}
883
884static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
885{
886 struct ata_port *ap = qc->ap;
887 struct nv_adma_port_priv *pp = ap->private_data;
888 u8 dmactl;
889
890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
891 WARN_ON(1);
892 return;
893 }
894
895 /* start host DMA transaction */
896 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 outb(dmactl | ATA_DMA_START,
898 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
899}
900
901static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
902{
903 struct ata_port *ap = qc->ap;
904 struct nv_adma_port_priv *pp = ap->private_data;
905
906 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
907 return;
908
909 /* clear start/stop bit */
910 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
911 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
912
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap); /* dummy read */
915}
916
917static u8 nv_adma_bmdma_status(struct ata_port *ap)
918{
919 struct nv_adma_port_priv *pp = ap->private_data;
920
921 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
922
923 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
924}
925
926static int nv_adma_port_start(struct ata_port *ap)
927{
928 struct device *dev = ap->host->dev;
929 struct nv_adma_port_priv *pp;
930 int rc;
931 void *mem;
932 dma_addr_t mem_dma;
933 void __iomem *mmio = nv_adma_ctl_block(ap);
934 u16 tmp;
935
936 VPRINTK("ENTER\n");
937
938 rc = ata_port_start(ap);
939 if (rc)
940 return rc;
941
942 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
943 if (!pp) {
944 rc = -ENOMEM;
945 goto err_out;
946 }
947
948 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
949 &mem_dma, GFP_KERNEL);
950
951 if (!mem) {
952 rc = -ENOMEM;
953 goto err_out_kfree;
954 }
955 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
956
957 /*
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
961 */
962 pp->cpb = mem;
963 pp->cpb_dma = mem_dma;
964
965 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
966 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
967
968 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
969 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
970
971 /*
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
973 */
974 pp->aprd = mem;
975 pp->aprd_dma = mem_dma;
976
977 ap->private_data = pp;
978
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* initialize port variables */
983 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
984
985 /* clear CPB fetch count */
986 writew(0, mmio + NV_ADMA_CPB_COUNT);
987
988 /* clear GO for register mode */
989 tmp = readw(mmio + NV_ADMA_CTL);
990 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
991
992 tmp = readw(mmio + NV_ADMA_CTL);
993 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
994 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
995 udelay(1);
996 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
997 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
998
999 return 0;
1000
1001err_out_kfree:
1002 kfree(pp);
1003err_out:
1004 ata_port_stop(ap);
1005 return rc;
1006}
1007
1008static void nv_adma_port_stop(struct ata_port *ap)
1009{
1010 struct device *dev = ap->host->dev;
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = nv_adma_ctl_block(ap);
1013
1014 VPRINTK("ENTER\n");
1015
1016 writew(0, mmio + NV_ADMA_CTL);
1017
1018 ap->private_data = NULL;
1019 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1020 kfree(pp);
1021 ata_port_stop(ap);
1022}
1023
1024
1025static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1026{
1027 void __iomem *mmio = probe_ent->mmio_base;
1028 struct ata_ioports *ioport = &probe_ent->port[port];
1029
1030 VPRINTK("ENTER\n");
1031
1032 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1033
1034 ioport->cmd_addr = (unsigned long) mmio;
1035 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1036 ioport->error_addr =
1037 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1038 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1039 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1040 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1041 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1042 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1043 ioport->status_addr =
1044 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1045 ioport->altstatus_addr =
1046 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1047}
1048
1049static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1050{
1051 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1052 unsigned int i;
1053 u32 tmp32;
1054
1055 VPRINTK("ENTER\n");
1056
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1059 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1061 NV_MCP_SATA_CFG_20_PORT1_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1063
1064 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1065
1066 for (i = 0; i < probe_ent->n_ports; i++)
1067 nv_adma_setup_port(probe_ent, i);
1068
1069 for (i = 0; i < probe_ent->n_ports; i++) {
1070 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1071 u16 tmp;
1072
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1076 }
1077
1078 return 0;
1079}
1080
1081static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1082 struct scatterlist *sg,
1083 int idx,
1084 struct nv_adma_prd *aprd)
1085{
1086 u8 flags;
1087
1088 memset(aprd, 0, sizeof(struct nv_adma_prd));
1089
1090 flags = 0;
1091 if (qc->tf.flags & ATA_TFLAG_WRITE)
1092 flags |= NV_APRD_WRITE;
1093 if (idx == qc->n_elem - 1)
1094 flags |= NV_APRD_END;
1095 else if (idx != 4)
1096 flags |= NV_APRD_CONT;
1097
1098 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1099 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1100 aprd->flags = flags;
1101}
1102
1103static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1104{
1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 unsigned int idx;
1107 struct nv_adma_prd *aprd;
1108 struct scatterlist *sg;
1109
1110 VPRINTK("ENTER\n");
1111
1112 idx = 0;
1113
1114 ata_for_each_sg(sg, qc) {
1115 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1116 nv_adma_fill_aprd(qc, sg, idx, aprd);
1117 idx++;
1118 }
1119 if (idx > 5)
1120 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1121}
1122
1123static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1124{
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1126 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1127 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1128 NV_CPB_CTL_APRD_VALID |
1129 NV_CPB_CTL_IEN;
1130
1131 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1132
1133 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1134 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1135 nv_adma_register_mode(qc->ap);
1136 ata_qc_prep(qc);
1137 return;
1138 }
1139
1140 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1141
1142 cpb->len = 3;
1143 cpb->tag = qc->tag;
1144 cpb->next_cpb_idx = 0;
1145
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc->tf.protocol == ATA_PROT_NCQ)
1148 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1149
1150 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1151
1152 nv_adma_fill_sg(qc, cpb);
1153
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1156 wmb();
1157 cpb->ctl_flags = ctl_flags;
1158}
1159
1160static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1161{
1162 struct nv_adma_port_priv *pp = qc->ap->private_data;
1163 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1164
1165 VPRINTK("ENTER\n");
1166
1167 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1168 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1171 nv_adma_register_mode(qc->ap);
1172 return ata_qc_issue_prot(qc);
1173 } else
1174 nv_adma_mode(qc->ap);
1175
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1178 wmb();
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1180
1181 DPRINTK("Issued tag %u\n",qc->tag);
1182
1183 return 0;
1184}
1185
1186static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1187{
1188 struct ata_host *host = dev_instance;
1189 unsigned int i;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1192
1193 spin_lock_irqsave(&host->lock, flags);
1194
1195 for (i = 0; i < host->n_ports; i++) {
1196 struct ata_port *ap;
1197
1198 ap = host->ports[i];
1199 if (ap &&
1200 !(ap->flags & ATA_FLAG_DISABLED)) {
1201 struct ata_queued_cmd *qc;
1202
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
1204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1205 handled += ata_host_intr(ap, qc);
1206 else
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
1210 }
1211
1212 }
1213
1214 spin_unlock_irqrestore(&host->lock, flags);
1215
1216 return IRQ_RETVAL(handled);
1217}
1218
344static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) 1219static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
345{ 1220{
346 int i, handled = 0; 1221 int i, handled = 0;
@@ -466,6 +1341,56 @@ static void nv_error_handler(struct ata_port *ap)
466 nv_hardreset, ata_std_postreset); 1341 nv_hardreset, ata_std_postreset);
467} 1342}
468 1343
1344static void nv_adma_error_handler(struct ata_port *ap)
1345{
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1349 int i;
1350 u16 tmp;
1351
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1356
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1360
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1367 }
1368
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1371
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1373
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1377
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1380
1381 /* Reset channel */
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1385 udelay(1);
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1388 }
1389
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1392}
1393
469static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1394static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
470{ 1395{
471 static int printed_version = 0; 1396 static int printed_version = 0;
@@ -475,6 +1400,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
475 int rc; 1400 int rc;
476 u32 bar; 1401 u32 bar;
477 unsigned long base; 1402 unsigned long base;
1403 unsigned long type = ent->driver_data;
1404 int mask_set = 0;
478 1405
479 // Make sure this is a SATA controller by counting the number of bars 1406 // Make sure this is a SATA controller by counting the number of bars
480 // (NVIDIA SATA controllers will always have six bars). Otherwise, 1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
@@ -483,7 +1410,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
483 if (pci_resource_start(pdev, bar) == 0) 1410 if (pci_resource_start(pdev, bar) == 0)
484 return -ENODEV; 1411 return -ENODEV;
485 1412
486 if (!printed_version++) 1413 if ( !printed_version++)
487 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
488 1415
489 rc = pci_enable_device(pdev); 1416 rc = pci_enable_device(pdev);
@@ -496,16 +1423,26 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
496 goto err_out_disable; 1423 goto err_out_disable;
497 } 1424 }
498 1425
499 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1426 if(type >= CK804 && adma_enabled) {
500 if (rc) 1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
501 goto err_out_regions; 1428 type = ADMA;
502 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
503 if (rc) 1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
504 goto err_out_regions; 1431 mask_set = 1;
1432 }
1433
1434 if(!mask_set) {
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1436 if (rc)
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1439 if (rc)
1440 goto err_out_regions;
1441 }
505 1442
506 rc = -ENOMEM; 1443 rc = -ENOMEM;
507 1444
508 ppi[0] = ppi[1] = &nv_port_info[ent->driver_data]; 1445 ppi[0] = ppi[1] = &nv_port_info[type];
509 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
510 if (!probe_ent) 1447 if (!probe_ent)
511 goto err_out_regions; 1448 goto err_out_regions;
@@ -522,7 +1459,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
522 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; 1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
523 1460
524 /* enable SATA space for CK804 */ 1461 /* enable SATA space for CK804 */
525 if (ent->driver_data == CK804) { 1462 if (type >= CK804) {
526 u8 regval; 1463 u8 regval;
527 1464
528 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); 1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
@@ -532,6 +1469,12 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
532 1469
533 pci_set_master(pdev); 1470 pci_set_master(pdev);
534 1471
1472 if (type == ADMA) {
1473 rc = nv_adma_host_init(probe_ent);
1474 if (rc)
1475 goto err_out_iounmap;
1476 }
1477
535 rc = ata_device_add(probe_ent); 1478 rc = ata_device_add(probe_ent);
536 if (rc != NV_PORTS) 1479 if (rc != NV_PORTS)
537 goto err_out_iounmap; 1480 goto err_out_iounmap;
@@ -566,6 +1509,33 @@ static void nv_ck804_host_stop(struct ata_host *host)
566 ata_pci_host_stop(host); 1509 ata_pci_host_stop(host);
567} 1510}
568 1511
1512static void nv_adma_host_stop(struct ata_host *host)
1513{
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1515 int i;
1516 u32 tmp32;
1517
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1520 u16 tmp;
1521
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1525 }
1526
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1533
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1535
1536 nv_ck804_host_stop(host);
1537}
1538
569static int __init nv_init(void) 1539static int __init nv_init(void)
570{ 1540{
571 return pci_register_driver(&nv_pci_driver); 1541 return pci_register_driver(&nv_pci_driver);
@@ -578,3 +1548,5 @@ static void __exit nv_exit(void)
578 1548
579module_init(nv_init); 1549module_init(nv_init);
580module_exit(nv_exit); 1550module_exit(nv_exit);
1551module_param_named(adma, adma_enabled, bool, 0444);
1552MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 72eda5160fad..a2778cf016bc 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -46,20 +46,19 @@
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
49#define DRV_VERSION "1.04" 49#define DRV_VERSION "1.05"
50 50
51 51
52enum { 52enum {
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_TBG_MODE = 0x41, /* TBG mode */
56 PDC_FLASH_CTL = 0x44, /* Flash control register */ 55 PDC_FLASH_CTL = 0x44, /* Flash control register */
57 PDC_PCI_CTL = 0x48, /* PCI control and status register */
58 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 56 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
59 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 57 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 58 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ 59 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
62 PDC_SLEW_CTL = 0x470, /* slew rate control reg */ 60 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
61 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
63 62
64 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | 63 PDC_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
65 (1<<8) | (1<<9) | (1<<10), 64 (1<<8) | (1<<9) | (1<<10),
@@ -78,6 +77,9 @@ enum {
78 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | 77 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
79 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 78 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
80 ATA_FLAG_PIO_POLLING, 79 ATA_FLAG_PIO_POLLING,
80
81 /* hp->flags bits */
82 PDC_FLAG_GEN_II = (1 << 0),
81}; 83};
82 84
83 85
@@ -87,6 +89,7 @@ struct pdc_port_priv {
87}; 89};
88 90
89struct pdc_host_priv { 91struct pdc_host_priv {
92 unsigned long flags;
90 int hotplug_offset; 93 int hotplug_offset;
91}; 94};
92 95
@@ -235,20 +238,20 @@ static const struct ata_port_info pdc_port_info[] = {
235 238
236static const struct pci_device_id pdc_ata_pci_tbl[] = { 239static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x }, 240 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
238 { PCI_VDEVICE(PROMISE, 0x3570), board_2037x },
239 { PCI_VDEVICE(PROMISE, 0x3571), board_2037x },
240 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x }, 241 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
241 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x }, 242 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
242 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x }, 243 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
244 { PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
245 { PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
243 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x }, 246 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
247 { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
244 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x }, 248 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
245 { PCI_VDEVICE(PROMISE, 0x3d73), board_2037x },
246 249
247 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 }, 250 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
248 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 }, 251 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
249 { PCI_VDEVICE(PROMISE, 0x3515), board_20319 }, 252 { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
250 { PCI_VDEVICE(PROMISE, 0x3519), board_20319 }, 253 { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
251 { PCI_VDEVICE(PROMISE, 0x3d17), board_20319 }, 254 { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
252 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 }, 255 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
253 256
254 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 }, 257 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
@@ -277,6 +280,7 @@ static struct pci_driver pdc_ata_pci_driver = {
277static int pdc_port_start(struct ata_port *ap) 280static int pdc_port_start(struct ata_port *ap)
278{ 281{
279 struct device *dev = ap->host->dev; 282 struct device *dev = ap->host->dev;
283 struct pdc_host_priv *hp = ap->host->private_data;
280 struct pdc_port_priv *pp; 284 struct pdc_port_priv *pp;
281 int rc; 285 int rc;
282 286
@@ -298,6 +302,16 @@ static int pdc_port_start(struct ata_port *ap)
298 302
299 ap->private_data = pp; 303 ap->private_data = pp;
300 304
305 /* fix up PHYMODE4 align timing */
306 if ((hp->flags & PDC_FLAG_GEN_II) && sata_scr_valid(ap)) {
307 void __iomem *mmio = (void __iomem *) ap->ioaddr.scr_addr;
308 unsigned int tmp;
309
310 tmp = readl(mmio + 0x014);
311 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
312 writel(tmp, mmio + 0x014);
313 }
314
301 return 0; 315 return 0;
302 316
303err_out_kfree: 317err_out_kfree:
@@ -640,9 +654,11 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
640 * "TODO: figure out why we do this" 654 * "TODO: figure out why we do this"
641 */ 655 */
642 656
643 /* change FIFO_SHD to 8 dwords, enable BMR_BURST */ 657 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
644 tmp = readl(mmio + PDC_FLASH_CTL); 658 tmp = readl(mmio + PDC_FLASH_CTL);
645 tmp |= 0x12000; /* bit 16 (fifo 8 dw) and 13 (bmr burst?) */ 659 tmp |= 0x02000; /* bit 13 (enable bmr burst) */
660 if (!(hp->flags & PDC_FLAG_GEN_II))
661 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
646 writel(tmp, mmio + PDC_FLASH_CTL); 662 writel(tmp, mmio + PDC_FLASH_CTL);
647 663
648 /* clear plug/unplug flags for all ports */ 664 /* clear plug/unplug flags for all ports */
@@ -653,6 +669,10 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
653 tmp = readl(mmio + hotplug_offset); 669 tmp = readl(mmio + hotplug_offset);
654 writel(tmp | 0xff0000, mmio + hotplug_offset); 670 writel(tmp | 0xff0000, mmio + hotplug_offset);
655 671
672 /* don't initialise TBG or SLEW on 2nd generation chips */
673 if (hp->flags & PDC_FLAG_GEN_II)
674 return;
675
656 /* reduce TBG clock to 133 Mhz. */ 676 /* reduce TBG clock to 133 Mhz. */
657 tmp = readl(mmio + PDC_TBG_MODE); 677 tmp = readl(mmio + PDC_TBG_MODE);
658 tmp &= ~0x30000; /* clear bit 17, 16*/ 678 tmp &= ~0x30000; /* clear bit 17, 16*/
@@ -746,6 +766,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
746 /* notice 4-port boards */ 766 /* notice 4-port boards */
747 switch (board_idx) { 767 switch (board_idx) {
748 case board_40518: 768 case board_40518:
769 hp->flags |= PDC_FLAG_GEN_II;
749 /* Override hotplug offset for SATAII150 */ 770 /* Override hotplug offset for SATAII150 */
750 hp->hotplug_offset = PDC2_SATA_PLUG_CSR; 771 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
751 /* Fall through */ 772 /* Fall through */
@@ -759,15 +780,14 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
759 probe_ent->port[3].scr_addr = base + 0x700; 780 probe_ent->port[3].scr_addr = base + 0x700;
760 break; 781 break;
761 case board_2057x: 782 case board_2057x:
783 case board_20771:
784 hp->flags |= PDC_FLAG_GEN_II;
762 /* Override hotplug offset for SATAII150 */ 785 /* Override hotplug offset for SATAII150 */
763 hp->hotplug_offset = PDC2_SATA_PLUG_CSR; 786 hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
764 /* Fall through */ 787 /* Fall through */
765 case board_2037x: 788 case board_2037x:
766 probe_ent->n_ports = 2; 789 probe_ent->n_ports = 2;
767 break; 790 break;
768 case board_20771:
769 probe_ent->n_ports = 2;
770 break;
771 case board_20619: 791 case board_20619:
772 probe_ent->n_ports = 4; 792 probe_ent->n_ports = 4;
773 793
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index ca8d99312472..7808d0369d91 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -356,6 +356,7 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
356 356
357static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 357static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
358{ 358{
359 struct ata_eh_info *ehi = &ap->eh_info;
359 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 360 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
360 u8 status; 361 u8 status;
361 362
@@ -428,6 +429,10 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
428 /* kick HSM in the ass */ 429 /* kick HSM in the ass */
429 ata_hsm_move(ap, qc, status, 0); 430 ata_hsm_move(ap, qc, status, 0);
430 431
432 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
433 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
434 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
435
431 return; 436 return;
432 437
433 err_hsm: 438 err_hsm:
@@ -534,6 +539,7 @@ static void sil_thaw(struct ata_port *ap)
534 */ 539 */
535static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) 540static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
536{ 541{
542 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
537 unsigned int n, quirks = 0; 543 unsigned int n, quirks = 0;
538 unsigned char model_num[41]; 544 unsigned char model_num[41];
539 545
@@ -549,16 +555,18 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
549 if (slow_down || 555 if (slow_down ||
550 ((ap->flags & SIL_FLAG_MOD15WRITE) && 556 ((ap->flags & SIL_FLAG_MOD15WRITE) &&
551 (quirks & SIL_QUIRK_MOD15WRITE))) { 557 (quirks & SIL_QUIRK_MOD15WRITE))) {
552 ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix " 558 if (print_info)
553 "(mod15write workaround)\n"); 559 ata_dev_printk(dev, KERN_INFO, "applying Seagate "
560 "errata fix (mod15write workaround)\n");
554 dev->max_sectors = 15; 561 dev->max_sectors = 15;
555 return; 562 return;
556 } 563 }
557 564
558 /* limit to udma5 */ 565 /* limit to udma5 */
559 if (quirks & SIL_QUIRK_UDMA5MAX) { 566 if (quirks & SIL_QUIRK_UDMA5MAX) {
560 ata_dev_printk(dev, KERN_INFO, 567 if (print_info)
561 "applying Maxtor errata fix %s\n", model_num); 568 ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
569 "errata fix %s\n", model_num);
562 dev->udma_mask &= ATA_UDMA5; 570 dev->udma_mask &= ATA_UDMA5;
563 return; 571 return;
564 } 572 }
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 169e200a6a71..5aa288d2fb86 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -100,10 +100,14 @@ enum {
100 */ 100 */
101 PORT_REGS_SIZE = 0x2000, 101 PORT_REGS_SIZE = 0x2000,
102 102
103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */ 103 PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */ 104 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
105 105
106 PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ 106 PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
107 PORT_PMP_STATUS = 0x0000, /* port device status offset */
108 PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
109 PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
110
107 /* 32 bit regs */ 111 /* 32 bit regs */
108 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ 112 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
109 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ 113 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
@@ -126,6 +130,7 @@ enum {
126 PORT_PHY_CFG = 0x1050, 130 PORT_PHY_CFG = 0x1050,
127 PORT_SLOT_STAT = 0x1800, 131 PORT_SLOT_STAT = 0x1800,
128 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ 132 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
133 PORT_CONTEXT = 0x1e04,
129 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ 134 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
130 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ 135 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
131 PORT_SCONTROL = 0x1f00, 136 PORT_SCONTROL = 0x1f00,
@@ -139,9 +144,9 @@ enum {
139 PORT_CS_INIT = (1 << 2), /* port initialize */ 144 PORT_CS_INIT = (1 << 2), /* port initialize */
140 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ 145 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
141 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */ 146 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
142 PORT_CS_RESUME = (1 << 6), /* port resume */ 147 PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
143 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ 148 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
144 PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */ 149 PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
145 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ 150 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
146 151
147 /* PORT_IRQ_STAT/ENABLE_SET/CLR */ 152 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
@@ -562,7 +567,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
562 567
563 /* do SRST */ 568 /* do SRST */
564 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 569 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
565 prb->fis[1] = 0; /* no PM yet */ 570 prb->fis[1] = 0; /* no PMP yet */
566 571
567 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 572 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
568 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 573 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
@@ -1050,7 +1055,8 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1050 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 1055 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
1051 1056
1052 /* Clear port multiplier enable and resume bits */ 1057 /* Clear port multiplier enable and resume bits */
1053 writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); 1058 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
1059 port + PORT_CTRL_CLR);
1054 } 1060 }
1055 1061
1056 /* Turn on interrupts */ 1062 /* Turn on interrupts */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 9d1235ba06b1..9c25a1e91730 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -173,7 +173,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2); 174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175 175
176 return val|val2; 176 return (val|val2) & 0xfffffffb; /* avoid problems with powerdowned ports */
177} 177}
178 178
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
@@ -212,7 +212,7 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); 213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214 214
215 return val | val2; 215 return (val | val2) & 0xfffffffb;
216} 216}
217 217
218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 218static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -239,7 +239,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
239 static int printed_version; 239 static int printed_version;
240 struct ata_probe_ent *probe_ent = NULL; 240 struct ata_probe_ent *probe_ent = NULL;
241 int rc; 241 int rc;
242 u32 genctl; 242 u32 genctl, val;
243 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi }; 243 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi };
244 int pci_dev_busy = 0; 244 int pci_dev_busy = 0;
245 u8 pmr; 245 u8 pmr;
@@ -285,17 +285,24 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
285 if (ent->device != 0x182) { 285 if (ent->device != 0x182) {
286 if ((pmr & SIS_PMR_COMBINED) == 0) { 286 if ((pmr & SIS_PMR_COMBINED) == 0) {
287 dev_printk(KERN_INFO, &pdev->dev, 287 dev_printk(KERN_INFO, &pdev->dev,
288 "Detected SiS 180/181 chipset in SATA mode\n"); 288 "Detected SiS 180/181/964 chipset in SATA mode\n");
289 port2_start = 64; 289 port2_start = 64;
290 } 290 }
291 else { 291 else {
292 dev_printk(KERN_INFO, &pdev->dev, 292 dev_printk(KERN_INFO, &pdev->dev,
293 "Detected SiS 180/181 chipset in combined mode\n"); 293 "Detected SiS 180/181 chipset in combined mode\n");
294 port2_start=0; 294 port2_start=0;
295 pi.flags |= ATA_FLAG_SLAVE_POSS;
295 } 296 }
296 } 297 }
297 else { 298 else {
298 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n"); 299 pci_read_config_dword ( pdev, 0x6C, &val);
300 if (val & (1L << 31)) {
301 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
302 pi.flags |= ATA_FLAG_SLAVE_POSS;
303 }
304 else
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
299 port2_start = 0x20; 306 port2_start = 0x20;
300 } 307 }
301 308
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4bef76a2f3f2..1f745f12f94e 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -5,6 +5,7 @@
5#include <linux/sysdev.h> 5#include <linux/sysdev.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/sched.h>
8#include <linux/cpu.h> 9#include <linux/cpu.h>
9#include <linux/topology.h> 10#include <linux/topology.h>
10#include <linux/device.h> 11#include <linux/device.h>
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ec5a1b90a0a2..e19ba4ebcd4e 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -759,6 +759,8 @@ static struct vio_driver viodasd_driver = {
759 } 759 }
760}; 760};
761 761
762static int need_delete_probe;
763
762/* 764/*
763 * Initialize the whole device driver. Handle module and non-module 765 * Initialize the whole device driver. Handle module and non-module
764 * versions 766 * versions
@@ -773,46 +775,67 @@ static int __init viodasd_init(void)
773 775
774 if (viopath_hostLp == HvLpIndexInvalid) { 776 if (viopath_hostLp == HvLpIndexInvalid) {
775 printk(VIOD_KERN_WARNING "invalid hosting partition\n"); 777 printk(VIOD_KERN_WARNING "invalid hosting partition\n");
776 return -EIO; 778 rc = -EIO;
779 goto early_fail;
777 } 780 }
778 781
779 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n", 782 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
780 viopath_hostLp); 783 viopath_hostLp);
781 784
782 /* register the block device */ 785 /* register the block device */
783 if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) { 786 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
787 if (rc) {
784 printk(VIOD_KERN_WARNING 788 printk(VIOD_KERN_WARNING
785 "Unable to get major number %d for %s\n", 789 "Unable to get major number %d for %s\n",
786 VIODASD_MAJOR, VIOD_GENHD_NAME); 790 VIODASD_MAJOR, VIOD_GENHD_NAME);
787 return -EIO; 791 goto early_fail;
788 } 792 }
789 /* Actually open the path to the hosting partition */ 793 /* Actually open the path to the hosting partition */
790 if (viopath_open(viopath_hostLp, viomajorsubtype_blockio, 794 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
791 VIOMAXREQ + 2)) { 795 VIOMAXREQ + 2);
796 if (rc) {
792 printk(VIOD_KERN_WARNING 797 printk(VIOD_KERN_WARNING
793 "error opening path to host partition %d\n", 798 "error opening path to host partition %d\n",
794 viopath_hostLp); 799 viopath_hostLp);
795 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); 800 goto unregister_blk;
796 return -EIO;
797 } 801 }
798 802
799 /* Initialize our request handler */ 803 /* Initialize our request handler */
800 vio_setHandler(viomajorsubtype_blockio, handle_block_event); 804 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
801 805
802 rc = vio_register_driver(&viodasd_driver); 806 rc = vio_register_driver(&viodasd_driver);
803 if (rc == 0) 807 if (rc) {
804 driver_create_file(&viodasd_driver.driver, &driver_attr_probe); 808 printk(VIOD_KERN_WARNING "vio_register_driver failed\n");
809 goto unset_handler;
810 }
811
812 /*
813 * If this call fails, it just means that we cannot dynamically
814 * add virtual disks, but the driver will still work fine for
815 * all existing disk, so ignore the failure.
816 */
817 if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
818 need_delete_probe = 1;
819
820 return 0;
821
822unset_handler:
823 vio_clearHandler(viomajorsubtype_blockio);
824 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
825unregister_blk:
826 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
827early_fail:
805 return rc; 828 return rc;
806} 829}
807module_init(viodasd_init); 830module_init(viodasd_init);
808 831
809void viodasd_exit(void) 832void __exit viodasd_exit(void)
810{ 833{
811 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe); 834 if (need_delete_probe)
835 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
812 vio_unregister_driver(&viodasd_driver); 836 vio_unregister_driver(&viodasd_driver);
813 vio_clearHandler(viomajorsubtype_blockio); 837 vio_clearHandler(viomajorsubtype_blockio);
814 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
815 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); 838 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
839 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
816} 840}
817
818module_exit(viodasd_exit); 841module_exit(viodasd_exit);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 091a11cd878c..20dc3be5ecfc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -21,6 +21,7 @@
21#include <linux/fcntl.h> 21#include <linux/fcntl.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/poll.h> 23#include <linux/poll.h>
24#include <linux/mm.h>
24#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26#include <linux/sysctl.h> 27#include <linux/sysctl.h>
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index ebace201bec6..26a860adcb38 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -36,6 +36,7 @@
36#include <linux/module.h> 36#include <linux/module.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/sched.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
41#include <linux/delay.h> 42#include <linux/delay.h>
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 050ced247f68..bb9a43c6cf3d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -22,6 +22,7 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/sched.h>
25#include <linux/miscdevice.h> 26#include <linux/miscdevice.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/io.h> 28#include <linux/io.h>
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index e5cb0fdab9b1..b1dc63e4ac7b 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -21,6 +21,7 @@
21 etc voltage & frequency control is not supported! 21 etc voltage & frequency control is not supported!
22*/ 22*/
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/sched.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/jiffies.h> 27#include <linux/jiffies.h>
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 26be4ea8a38a..e8ef62b83d6b 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <linux/jiffies.h>
36#include <asm/io.h> 37#include <asm/io.h>
37 38
38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */ 39#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 2af634d7acf4..eb7ab112c050 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -35,7 +35,7 @@
35#include <linux/ide.h> 35#include <linux/ide.h>
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38#ifdef CONFIG_PPC_MULTIPLATFORM 38#ifdef CONFIG_PPC_CHRP
39#include <asm/processor.h> 39#include <asm/processor.h>
40#endif 40#endif
41 41
@@ -442,7 +442,7 @@ static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
442 hwif->speedproc = &via_set_drive; 442 hwif->speedproc = &via_set_drive;
443 443
444 444
445#if defined(CONFIG_PPC_CHRP) && defined(CONFIG_PPC32) 445#ifdef CONFIG_PPC_CHRP
446 if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) { 446 if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) {
447 hwif->irq = hwif->channel ? 15 : 14; 447 hwif->irq = hwif->channel ? 15 : 14;
448 } 448 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 0606744c3f84..5e122501fd80 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -35,6 +35,7 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38#include <linux/highmem.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <asm/scatterlist.h> 40#include <asm/scatterlist.h>
40#include <linux/scatterlist.h> 41#include <linux/scatterlist.h>
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 1f5ebe9ee72c..03319ea5aa0c 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -10,6 +10,8 @@
10 */ 10 */
11 11
12#include <linux/proc_fs.h> 12#include <linux/proc_fs.h>
13#include <linux/timer.h>
14#include <linux/jiffies.h>
13 15
14#include "isdn_divert.h" 16#include "isdn_divert.h"
15 17
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/ledtrig-ide-disk.c
index fa651886ab4f..54b155c7026f 100644
--- a/drivers/leds/ledtrig-ide-disk.c
+++ b/drivers/leds/ledtrig-ide-disk.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/jiffies.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/timer.h> 18#include <linux/timer.h>
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 29a8818a32ec..d756bdb01c59 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/jiffies.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/list.h> 18#include <linux/list.h>
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 7f8477d3a661..92ccee85e2a2 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -228,4 +228,11 @@ config ANSLCD
228 tristate "Support for ANS LCD display" 228 tristate "Support for ANS LCD display"
229 depends on ADB_CUDA && PPC_PMAC 229 depends on ADB_CUDA && PPC_PMAC
230 230
231config PMAC_RACKMETER
232 tristate "Support for Apple XServe front panel LEDs"
233 depends on PPC_PMAC
234 help
235 This driver procides some support to control the front panel
236 blue LEDs "vu-meter" of the XServer macs.
237
231endmenu 238endmenu
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index b53d45f87b0b..2dfc3f4eaf42 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_WINDFARM_PM112) += windfarm_pm112.o windfarm_smu_sat.o \
42 windfarm_smu_sensors.o \ 42 windfarm_smu_sensors.o \
43 windfarm_max6690_sensor.o \ 43 windfarm_max6690_sensor.o \
44 windfarm_lm75_sensor.o windfarm_pid.o 44 windfarm_lm75_sensor.o windfarm_pid.o
45obj-$(CONFIG_PMAC_RACKMETER) += rack-meter.o
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
new file mode 100644
index 000000000000..f1b6f563673a
--- /dev/null
+++ b/drivers/macintosh/rack-meter.c
@@ -0,0 +1,612 @@
1/*
2 * RackMac vu-meter driver
3 *
4 * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 *
7 * Released under the term of the GNU GPL v2.
8 *
9 * Support the CPU-meter LEDs of the Xserve G5
10 *
11 * TODO: Implement PWM to do variable intensity and provide userland
12 * interface for fun. Also, the CPU-meter could be made nicer by being
13 * a bit less "immediate" but giving instead a more average load over
14 * time. Patches welcome :-)
15 *
16 */
17#undef DEBUG
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/dma-mapping.h>
26#include <linux/kernel_stat.h>
27
28#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/machdep.h>
31#include <asm/pmac_feature.h>
32#include <asm/dbdma.h>
33#include <asm/dbdma.h>
34#include <asm/macio.h>
35#include <asm/keylargo.h>
36
37/* Number of samples in a sample buffer */
38#define SAMPLE_COUNT 256
39
40/* CPU meter sampling rate in ms */
41#define CPU_SAMPLING_RATE 250
42
43struct rackmeter_dma {
44 struct dbdma_cmd cmd[4] ____cacheline_aligned;
45 u32 mark ____cacheline_aligned;
46 u32 buf1[SAMPLE_COUNT] ____cacheline_aligned;
47 u32 buf2[SAMPLE_COUNT] ____cacheline_aligned;
48} ____cacheline_aligned;
49
50struct rackmeter_cpu {
51 struct work_struct sniffer;
52 cputime64_t prev_wall;
53 cputime64_t prev_idle;
54 int zero;
55} ____cacheline_aligned;
56
57struct rackmeter {
58 struct macio_dev *mdev;
59 unsigned int irq;
60 struct device_node *i2s;
61 u8 *ubuf;
62 struct dbdma_regs __iomem *dma_regs;
63 void __iomem *i2s_regs;
64 dma_addr_t dma_buf_p;
65 struct rackmeter_dma *dma_buf_v;
66 int stale_irq;
67 struct rackmeter_cpu cpu[2];
68 int paused;
69 struct mutex sem;
70};
71
72/* To be set as a tunable */
73static int rackmeter_ignore_nice;
74
75/* This GPIO is whacked by the OS X driver when initializing */
76#define RACKMETER_MAGIC_GPIO 0x78
77
78/* This is copied from cpufreq_ondemand, maybe we should put it in
79 * a common header somewhere
80 */
81static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
82{
83 cputime64_t retval;
84
85 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
86 kstat_cpu(cpu).cpustat.iowait);
87
88 if (rackmeter_ignore_nice)
89 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
90
91 return retval;
92}
93
94static void rackmeter_setup_i2s(struct rackmeter *rm)
95{
96 struct macio_chip *macio = rm->mdev->bus->chip;
97
98 /* First whack magic GPIO */
99 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
100
101
102 /* Call feature code to enable the sound channel and the proper
103 * clock sources
104 */
105 pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
106
107 /* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
108 * This is a bit racy, thus we should add new platform functions to
109 * handle that. snd-aoa needs that too
110 */
111 MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
112 MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
113 (void)MACIO_IN32(KEYLARGO_FCR1);
114 udelay(10);
115
116 /* Then setup i2s. For now, we use the same magic value that
117 * the OS X driver seems to use. We might want to play around
118 * with the clock divisors later
119 */
120 out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
121 (void)in_le32(rm->i2s_regs + 0x10);
122 udelay(10);
123
124 /* Fully restart i2s*/
125 MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
126 KL1_I2S0_CLK_ENABLE_BIT);
127 (void)MACIO_IN32(KEYLARGO_FCR1);
128 udelay(10);
129}
130
131static void rackmeter_set_default_pattern(struct rackmeter *rm)
132{
133 int i;
134
135 for (i = 0; i < 16; i++) {
136 if (i < 8)
137 rm->ubuf[i] = (i & 1) * 255;
138 else
139 rm->ubuf[i] = ((~i) & 1) * 255;
140 }
141}
142
143static void rackmeter_do_pause(struct rackmeter *rm, int pause)
144{
145 struct rackmeter_dma *rdma = rm->dma_buf_v;
146
147 pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
148
149 rm->paused = pause;
150 if (pause) {
151 DBDMA_DO_STOP(rm->dma_regs);
152 return;
153 }
154 memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32));
155 memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32));
156
157 rm->dma_buf_v->mark = 0;
158
159 mb();
160 out_le32(&rm->dma_regs->cmdptr_hi, 0);
161 out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
162 out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
163}
164
165static void rackmeter_setup_dbdma(struct rackmeter *rm)
166{
167 struct rackmeter_dma *db = rm->dma_buf_v;
168 struct dbdma_cmd *cmd = db->cmd;
169
170 /* Make sure dbdma is reset */
171 DBDMA_DO_RESET(rm->dma_regs);
172
173 pr_debug("rackmeter: mark offset=0x%lx\n",
174 offsetof(struct rackmeter_dma, mark));
175 pr_debug("rackmeter: buf1 offset=0x%lx\n",
176 offsetof(struct rackmeter_dma, buf1));
177 pr_debug("rackmeter: buf2 offset=0x%lx\n",
178 offsetof(struct rackmeter_dma, buf2));
179
180 /* Prepare 4 dbdma commands for the 2 buffers */
181 memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
182 st_le16(&cmd->req_count, 4);
183 st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
184 st_le32(&cmd->phy_addr, rm->dma_buf_p +
185 offsetof(struct rackmeter_dma, mark));
186 st_le32(&cmd->cmd_dep, 0x02000000);
187 cmd++;
188
189 st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
190 st_le16(&cmd->command, OUTPUT_MORE);
191 st_le32(&cmd->phy_addr, rm->dma_buf_p +
192 offsetof(struct rackmeter_dma, buf1));
193 cmd++;
194
195 st_le16(&cmd->req_count, 4);
196 st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
197 st_le32(&cmd->phy_addr, rm->dma_buf_p +
198 offsetof(struct rackmeter_dma, mark));
199 st_le32(&cmd->cmd_dep, 0x01000000);
200 cmd++;
201
202 st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
203 st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS);
204 st_le32(&cmd->phy_addr, rm->dma_buf_p +
205 offsetof(struct rackmeter_dma, buf2));
206 st_le32(&cmd->cmd_dep, rm->dma_buf_p);
207
208 rackmeter_do_pause(rm, 0);
209}
210
211static void rackmeter_do_timer(void *data)
212{
213 struct rackmeter *rm = data;
214 unsigned int cpu = smp_processor_id();
215 struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
216 cputime64_t cur_jiffies, total_idle_ticks;
217 unsigned int total_ticks, idle_ticks;
218 int i, offset, load, cumm, pause;
219
220 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
221 total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
222 rcpu->prev_wall);
223 rcpu->prev_wall = cur_jiffies;
224
225 total_idle_ticks = get_cpu_idle_time(cpu);
226 idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
227 rcpu->prev_idle);
228 rcpu->prev_idle = total_idle_ticks;
229
230 /* We do a very dumb calculation to update the LEDs for now,
231 * we'll do better once we have actual PWM implemented
232 */
233 load = (9 * (total_ticks - idle_ticks)) / total_ticks;
234
235 offset = cpu << 3;
236 cumm = 0;
237 for (i = 0; i < 8; i++) {
238 u8 ub = (load > i) ? 0xff : 0;
239 rm->ubuf[i + offset] = ub;
240 cumm |= ub;
241 }
242 rcpu->zero = (cumm == 0);
243
244 /* Now check if LEDs are all 0, we can stop DMA */
245 pause = (rm->cpu[0].zero && rm->cpu[1].zero);
246 if (pause != rm->paused) {
247 mutex_lock(&rm->sem);
248 pause = (rm->cpu[0].zero && rm->cpu[1].zero);
249 rackmeter_do_pause(rm, pause);
250 mutex_unlock(&rm->sem);
251 }
252 schedule_delayed_work_on(cpu, &rcpu->sniffer,
253 msecs_to_jiffies(CPU_SAMPLING_RATE));
254}
255
256static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
257{
258 unsigned int cpu;
259
260 /* This driver works only with 1 or 2 CPUs numbered 0 and 1,
261 * but that's really all we have on Apple Xserve. It doesn't
262 * play very nice with CPU hotplug neither but we don't do that
263 * on those machines yet
264 */
265
266 INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm);
267 INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm);
268
269 for_each_online_cpu(cpu) {
270 struct rackmeter_cpu *rcpu;
271
272 if (cpu > 1)
273 continue;
274 rcpu = &rm->cpu[cpu];;
275 rcpu->prev_idle = get_cpu_idle_time(cpu);
276 rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
277 schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
278 msecs_to_jiffies(CPU_SAMPLING_RATE));
279 }
280}
281
282static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
283{
284 cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
285 cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
286}
287
288static int rackmeter_setup(struct rackmeter *rm)
289{
290 pr_debug("rackmeter: setting up i2s..\n");
291 rackmeter_setup_i2s(rm);
292
293 pr_debug("rackmeter: setting up default pattern..\n");
294 rackmeter_set_default_pattern(rm);
295
296 pr_debug("rackmeter: setting up dbdma..\n");
297 rackmeter_setup_dbdma(rm);
298
299 pr_debug("rackmeter: start CPU measurements..\n");
300 rackmeter_init_cpu_sniffer(rm);
301
302 printk(KERN_INFO "RackMeter initialized\n");
303
304 return 0;
305}
306
307/* XXX FIXME: No PWM yet, this is 0/1 */
308static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
309{
310 int led;
311 u32 sample = 0;
312
313 for (led = 0; led < 16; led++) {
314 sample >>= 1;
315 sample |= ((rm->ubuf[led] >= 0x80) << 15);
316 }
317 return (sample << 17) | (sample >> 15);
318}
319
320static irqreturn_t rackmeter_irq(int irq, void *arg)
321{
322 struct rackmeter *rm = arg;
323 struct rackmeter_dma *db = rm->dma_buf_v;
324 unsigned int mark, i;
325 u32 *buf;
326
327 /* Flush PCI buffers with an MMIO read. Maybe we could actually
328 * check the status one day ... in case things go wrong, though
329 * this never happened to me
330 */
331 (void)in_le32(&rm->dma_regs->status);
332
333 /* Make sure the CPU gets us in order */
334 rmb();
335
336 /* Read mark */
337 mark = db->mark;
338 if (mark != 1 && mark != 2) {
339 printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
340 mark);
341 /* We allow for 3 errors like that (stale DBDMA irqs) */
342 if (++rm->stale_irq > 3) {
343 printk(KERN_ERR "rackmeter: Too many errors,"
344 " stopping DMA\n");
345 DBDMA_DO_RESET(rm->dma_regs);
346 }
347 return IRQ_HANDLED;
348 }
349
350 /* Next buffer we need to fill is mark value */
351 buf = mark == 1 ? db->buf1 : db->buf2;
352
353 /* Fill it now. This routine converts the 8 bits depth sample array
354 * into the PWM bitmap for each LED.
355 */
356 for (i = 0; i < SAMPLE_COUNT; i++)
357 buf[i] = rackmeter_calc_sample(rm, i);
358
359
360 return IRQ_HANDLED;
361}
362
363static int __devinit rackmeter_probe(struct macio_dev* mdev,
364 const struct of_device_id *match)
365{
366 struct device_node *i2s = NULL, *np = NULL;
367 struct rackmeter *rm = NULL;
368 struct resource ri2s, rdma;
369 int rc = -ENODEV;
370
371 pr_debug("rackmeter_probe()\n");
372
373 /* Get i2s-a node */
374 while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
375 if (strcmp(i2s->name, "i2s-a") == 0)
376 break;
377 if (i2s == NULL) {
378 pr_debug(" i2s-a child not found\n");
379 goto bail;
380 }
381 /* Get lightshow or virtual sound */
382 while ((np = of_get_next_child(i2s, np)) != NULL) {
383 if (strcmp(np->name, "lightshow") == 0)
384 break;
385 if ((strcmp(np->name, "sound") == 0) &&
386 get_property(np, "virtual", NULL) != NULL)
387 break;
388 }
389 if (np == NULL) {
390 pr_debug(" lightshow or sound+virtual child not found\n");
391 goto bail;
392 }
393
394 /* Create and initialize our instance data */
395 rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL);
396 if (rm == NULL) {
397 printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
398 rc = -ENOMEM;
399 goto bail_release;
400 }
401 rm->mdev = mdev;
402 rm->i2s = i2s;
403 mutex_init(&rm->sem);
404 dev_set_drvdata(&mdev->ofdev.dev, rm);
405 /* Check resources availability. We need at least resource 0 and 1 */
406#if 0 /* Use that when i2s-a is finally an mdev per-se */
407 if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
408 printk(KERN_ERR
409 "rackmeter: found match but lacks resources: %s"
410 " (%d resources, %d interrupts)\n",
411 mdev->ofdev.node->full_name);
412 rc = -ENXIO;
413 goto bail_free;
414 }
415 if (macio_request_resources(mdev, "rackmeter")) {
416 printk(KERN_ERR
417 "rackmeter: failed to request resources: %s\n",
418 mdev->ofdev.node->full_name);
419 rc = -EBUSY;
420 goto bail_free;
421 }
422 rm->irq = macio_irq(mdev, 1);
423#else
424 rm->irq = irq_of_parse_and_map(i2s, 1);
425 if (rm->irq == NO_IRQ ||
426 of_address_to_resource(i2s, 0, &ri2s) ||
427 of_address_to_resource(i2s, 1, &rdma)) {
428 printk(KERN_ERR
429 "rackmeter: found match but lacks resources: %s",
430 mdev->ofdev.node->full_name);
431 rc = -ENXIO;
432 goto bail_free;
433 }
434#endif
435
436 pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start);
437 pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start);
438 pr_debug(" irq %d\n", rm->irq);
439
440 rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
441 if (rm->ubuf == NULL) {
442 printk(KERN_ERR
443 "rackmeter: failed to allocate samples page !\n");
444 rc = -ENOMEM;
445 goto bail_release;
446 }
447
448 rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
449 sizeof(struct rackmeter_dma),
450 &rm->dma_buf_p, GFP_KERNEL);
451 if (rm->dma_buf_v == NULL) {
452 printk(KERN_ERR
453 "rackmeter: failed to allocate dma buffer !\n");
454 rc = -ENOMEM;
455 goto bail_free_samples;
456 }
457#if 0
458 rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
459#else
460 rm->i2s_regs = ioremap(ri2s.start, 0x1000);
461#endif
462 if (rm->i2s_regs == NULL) {
463 printk(KERN_ERR
464 "rackmeter: failed to map i2s registers !\n");
465 rc = -ENXIO;
466 goto bail_free_dma;
467 }
468#if 0
469 rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
470#else
471 rm->dma_regs = ioremap(rdma.start, 0x100);
472#endif
473 if (rm->dma_regs == NULL) {
474 printk(KERN_ERR
475 "rackmeter: failed to map dma registers !\n");
476 rc = -ENXIO;
477 goto bail_unmap_i2s;
478 }
479
480 rc = rackmeter_setup(rm);
481 if (rc) {
482 printk(KERN_ERR
483 "rackmeter: failed to initialize !\n");
484 rc = -ENXIO;
485 goto bail_unmap_dma;
486 }
487
488 rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
489 if (rc != 0) {
490 printk(KERN_ERR
491 "rackmeter: failed to request interrupt !\n");
492 goto bail_stop_dma;
493 }
494 of_node_put(np);
495 return 0;
496
497 bail_stop_dma:
498 DBDMA_DO_RESET(rm->dma_regs);
499 bail_unmap_dma:
500 iounmap(rm->dma_regs);
501 bail_unmap_i2s:
502 iounmap(rm->i2s_regs);
503 bail_free_dma:
504 dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
505 sizeof(struct rackmeter_dma),
506 rm->dma_buf_v, rm->dma_buf_p);
507 bail_free_samples:
508 free_page((unsigned long)rm->ubuf);
509 bail_release:
510#if 0
511 macio_release_resources(mdev);
512#endif
513 bail_free:
514 kfree(rm);
515 bail:
516 of_node_put(i2s);
517 of_node_put(np);
518 dev_set_drvdata(&mdev->ofdev.dev, NULL);
519 return rc;
520}
521
522static int __devexit rackmeter_remove(struct macio_dev* mdev)
523{
524 struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
525
526 /* Stop CPU sniffer timer & work queues */
527 rackmeter_stop_cpu_sniffer(rm);
528
529 /* Clear reference to private data */
530 dev_set_drvdata(&mdev->ofdev.dev, NULL);
531
532 /* Stop/reset dbdma */
533 DBDMA_DO_RESET(rm->dma_regs);
534
535 /* Release the IRQ */
536 free_irq(rm->irq, rm);
537
538 /* Unmap registers */
539 iounmap(rm->dma_regs);
540 iounmap(rm->i2s_regs);
541
542 /* Free DMA */
543 dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
544 sizeof(struct rackmeter_dma),
545 rm->dma_buf_v, rm->dma_buf_p);
546
547 /* Free samples */
548 free_page((unsigned long)rm->ubuf);
549
550#if 0
551 /* Release resources */
552 macio_release_resources(mdev);
553#endif
554
555 /* Get rid of me */
556 kfree(rm);
557
558 return 0;
559}
560
561static int rackmeter_shutdown(struct macio_dev* mdev)
562{
563 struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
564
565 if (rm == NULL)
566 return -ENODEV;
567
568 /* Stop CPU sniffer timer & work queues */
569 rackmeter_stop_cpu_sniffer(rm);
570
571 /* Stop/reset dbdma */
572 DBDMA_DO_RESET(rm->dma_regs);
573
574 return 0;
575}
576
577static struct of_device_id rackmeter_match[] = {
578 { .name = "i2s" },
579 { }
580};
581
582static struct macio_driver rackmeter_drv = {
583 .name = "rackmeter",
584 .owner = THIS_MODULE,
585 .match_table = rackmeter_match,
586 .probe = rackmeter_probe,
587 .remove = rackmeter_remove,
588 .shutdown = rackmeter_shutdown,
589};
590
591
592static int __init rackmeter_init(void)
593{
594 pr_debug("rackmeter_init()\n");
595
596 return macio_register_driver(&rackmeter_drv);
597}
598
599static void __exit rackmeter_exit(void)
600{
601 pr_debug("rackmeter_exit()\n");
602
603 macio_unregister_driver(&rackmeter_drv);
604}
605
606module_init(rackmeter_init);
607module_exit(rackmeter_exit);
608
609
610MODULE_LICENSE("GPL");
611MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
612MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4871158aca3e..6dde27ab79a8 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -46,6 +46,7 @@
46#include <asm/abs_addr.h> 46#include <asm/abs_addr.h>
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <asm/of_device.h> 48#include <asm/of_device.h>
49#include <asm/of_platform.h>
49 50
50#define VERSION "0.7" 51#define VERSION "0.7"
51#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." 52#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
@@ -653,7 +654,7 @@ static int __init smu_init_sysfs(void)
653 * I'm a bit too far from figuring out how that works with those 654 * I'm a bit too far from figuring out how that works with those
654 * new chipsets, but that will come back and bite us 655 * new chipsets, but that will come back and bite us
655 */ 656 */
656 of_register_driver(&smu_of_platform_driver); 657 of_register_platform_driver(&smu_of_platform_driver);
657 return 0; 658 return 0;
658} 659}
659 660
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index a0f30d0853ea..13b953ae8ebc 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -30,7 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/system.h> 31#include <asm/system.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/of_device.h> 33#include <asm/of_platform.h>
34 34
35#undef DEBUG 35#undef DEBUG
36 36
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index d00c0c37a12e..2e4ad44a8636 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -129,6 +129,7 @@
129#include <asm/sections.h> 129#include <asm/sections.h>
130#include <asm/of_device.h> 130#include <asm/of_device.h>
131#include <asm/macio.h> 131#include <asm/macio.h>
132#include <asm/of_platform.h>
132 133
133#include "therm_pm72.h" 134#include "therm_pm72.h"
134 135
@@ -2236,14 +2237,14 @@ static int __init therm_pm72_init(void)
2236 return -ENODEV; 2237 return -ENODEV;
2237 } 2238 }
2238 2239
2239 of_register_driver(&fcu_of_platform_driver); 2240 of_register_platform_driver(&fcu_of_platform_driver);
2240 2241
2241 return 0; 2242 return 0;
2242} 2243}
2243 2244
2244static void __exit therm_pm72_exit(void) 2245static void __exit therm_pm72_exit(void)
2245{ 2246{
2246 of_unregister_driver(&fcu_of_platform_driver); 2247 of_unregister_platform_driver(&fcu_of_platform_driver);
2247 2248
2248 if (of_dev) 2249 if (of_dev)
2249 of_device_unregister(of_dev); 2250 of_device_unregister(of_dev);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 738faab1b22c..a1d3a987cb3a 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -36,12 +36,13 @@
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/init.h> 38#include <linux/init.h>
39
39#include <asm/prom.h> 40#include <asm/prom.h>
40#include <asm/machdep.h> 41#include <asm/machdep.h>
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/system.h> 43#include <asm/system.h>
43#include <asm/sections.h> 44#include <asm/sections.h>
44#include <asm/of_device.h> 45#include <asm/of_platform.h>
45#include <asm/macio.h> 46#include <asm/macio.h>
46 47
47#define LOG_TEMP 0 /* continously log temperature */ 48#define LOG_TEMP 0 /* continously log temperature */
@@ -511,14 +512,14 @@ g4fan_init( void )
511 return -ENODEV; 512 return -ENODEV;
512 } 513 }
513 514
514 of_register_driver( &therm_of_driver ); 515 of_register_platform_driver( &therm_of_driver );
515 return 0; 516 return 0;
516} 517}
517 518
518static void __exit 519static void __exit
519g4fan_exit( void ) 520g4fan_exit( void )
520{ 521{
521 of_unregister_driver( &therm_of_driver ); 522 of_unregister_platform_driver( &therm_of_driver );
522 523
523 if( x.of_dev ) 524 if( x.of_dev )
524 of_device_unregister( x.of_dev ); 525 of_device_unregister( x.of_dev );
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index 8458ff3f351e..206c13e47a06 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -30,6 +30,7 @@
30#include <linux/input.h> 30#include <linux/input.h>
31#include <linux/dvb/frontend.h> 31#include <linux/dvb/frontend.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/mm.h>
33 34
34#include "dmxdev.h" 35#include "dmxdev.h"
35#include "dvb_demux.h" 36#include "dvb_demux.h"
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 72ef7bde3346..f143e13b229d 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,7 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#include <linux/mm.h>
29#include "ehea.h" 30#include "ehea.h"
30#include "ehea_phyp.h" 31#include "ehea_phyp.h"
31#include "ehea_qmr.h" 32#include "ehea_qmr.h"
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index f73f10a0a562..407d2acbf7c7 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -24,6 +24,7 @@
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25 25
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/dcr.h>
27 28
28/* 29/*
29 * These MAL "versions" probably aren't the real versions IBM uses for these 30 * These MAL "versions" probably aren't the real versions IBM uses for these
@@ -191,6 +192,7 @@ struct mal_commac {
191 192
192struct ibm_ocp_mal { 193struct ibm_ocp_mal {
193 int dcrbase; 194 int dcrbase;
195 dcr_host_t dcrhost;
194 196
195 struct list_head poll_list; 197 struct list_head poll_list;
196 struct net_device poll_dev; 198 struct net_device poll_dev;
@@ -207,12 +209,12 @@ struct ibm_ocp_mal {
207 209
208static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg) 210static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
209{ 211{
210 return mfdcr(mal->dcrbase + reg); 212 return dcr_read(mal->dcrhost, mal->dcrbase + reg);
211} 213}
212 214
213static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val) 215static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
214{ 216{
215 mtdcr(mal->dcrbase + reg, val); 217 dcr_write(mal->dcrhost, mal->dcrbase + reg, val);
216} 218}
217 219
218/* Register MAL devices */ 220/* Register MAL devices */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 44c9f993dcc4..99343b5836b8 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -50,7 +50,6 @@
50#include <asm/semaphore.h> 50#include <asm/semaphore.h>
51#include <asm/hvcall.h> 51#include <asm/hvcall.h>
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <asm/iommu.h>
54#include <asm/vio.h> 53#include <asm/vio.h>
55#include <asm/uaccess.h> 54#include <asm/uaccess.h>
56#include <linux/seq_file.h> 55#include <linux/seq_file.h>
@@ -1000,8 +999,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1000 adapter->mac_addr = 0; 999 adapter->mac_addr = 0;
1001 memcpy(&adapter->mac_addr, mac_addr_p, 6); 1000 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1002 1001
1003 adapter->liobn = dev->iommu_table->it_index;
1004
1005 netdev->irq = dev->irq; 1002 netdev->irq = dev->irq;
1006 netdev->open = ibmveth_open; 1003 netdev->open = ibmveth_open;
1007 netdev->poll = ibmveth_poll; 1004 netdev->poll = ibmveth_poll;
@@ -1115,7 +1112,6 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
1115 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); 1112 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1116 1113
1117 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); 1114 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1118 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
1119 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", 1115 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1120 current_mac[0], current_mac[1], current_mac[2], 1116 current_mac[0], current_mac[1], current_mac[2],
1121 current_mac[3], current_mac[4], current_mac[5]); 1117 current_mac[3], current_mac[4], current_mac[5]);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index f5b25bff1540..bb69ccae8ace 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -118,7 +118,6 @@ struct ibmveth_adapter {
118 struct net_device_stats stats; 118 struct net_device_stats stats;
119 unsigned int mcastFilterSize; 119 unsigned int mcastFilterSize;
120 unsigned long mac_addr; 120 unsigned long mac_addr;
121 unsigned long liobn;
122 void * buffer_list_addr; 121 void * buffer_list_addr;
123 void * filter_list_addr; 122 void * filter_list_addr;
124 dma_addr_t buffer_list_dma; 123 dma_addr_t buffer_list_dma;
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 6efbd499d752..4256c13c73c2 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -57,6 +57,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
57#include <linux/netdevice.h> 57#include <linux/netdevice.h>
58#include <linux/etherdevice.h> 58#include <linux/etherdevice.h>
59#include <linux/skbuff.h> 59#include <linux/skbuff.h>
60#include <linux/mm.h>
60#include <linux/bitops.h> 61#include <linux/bitops.h>
61 62
62#include <asm/io.h> 63#include <asm/io.h>
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index d66328975425..1a6fed76d4cc 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -36,6 +36,7 @@
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/mm.h>
39 40
40#include <asm/io.h> 41#include <asm/io.h>
41#include <asm/system.h> 42#include <asm/system.h>
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index f14e99276dba..096d4a100bf2 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -254,7 +254,7 @@ static int fixed_mdio_register_device(int number, int speed, int duplex)
254 goto device_create_fail; 254 goto device_create_fail;
255 } 255 }
256 256
257 phydev->irq = -1; 257 phydev->irq = PHY_IGNORE_INTERRUPT;
258 phydev->dev.bus = &mdio_bus_type; 258 phydev->dev.bus = &mdio_bus_type;
259 259
260 if(number) 260 if(number)
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 12cbfd190dd7..92d11b961db8 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -114,6 +114,7 @@
114#include <linux/dma-mapping.h> 114#include <linux/dma-mapping.h>
115#include <linux/ip.h> 115#include <linux/ip.h>
116#include <linux/mii.h> 116#include <linux/mii.h>
117#include <linux/mm.h>
117 118
118#include "h/skdrv1st.h" 119#include "h/skdrv1st.h"
119#include "h/skdrv2nd.h" 120#include "h/skdrv2nd.h"
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index f16f696c1ff2..ebb6aa39f9c7 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -88,12 +88,11 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
88static inline u32 88static inline u32
89spider_net_read_reg(struct spider_net_card *card, u32 reg) 89spider_net_read_reg(struct spider_net_card *card, u32 reg)
90{ 90{
91 u32 value; 91 /* We use the powerpc specific variants instead of readl_be() because
92 92 * we know spidernet is not a real PCI device and we can thus avoid the
93 value = readl(card->regs + reg); 93 * performance hit caused by the PCI workarounds.
94 value = le32_to_cpu(value); 94 */
95 95 return in_be32(card->regs + reg);
96 return value;
97} 96}
98 97
99/** 98/**
@@ -105,8 +104,11 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
105static inline void 104static inline void
106spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
107{ 106{
108 value = cpu_to_le32(value); 107 /* We use the powerpc specific variants instead of writel_be() because
109 writel(value, card->regs + reg); 108 * we know spidernet is not a real PCI device and we can thus avoid the
109 * performance hit caused by the PCI workarounds.
110 */
111 out_be32(card->regs + reg, value);
110} 112}
111 113
112/** spider_net_write_phy - write to phy register 114/** spider_net_write_phy - write to phy register
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 7a0aee6c869d..bf873ea25797 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -41,6 +41,7 @@
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/mii.h> 42#include <linux/mii.h>
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/mm.h>
44#include <asm/processor.h> /* Processor type for cache alignment. */ 45#include <asm/processor.h> /* Processor type for cache alignment. */
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/io.h> 47#include <asm/io.h>
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index b865db363ba0..47a1c09d19ac 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -38,6 +38,7 @@ static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.ne
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39#include <linux/bitops.h> 39#include <linux/bitops.h>
40 40
41#include <asm/cacheflush.h>
41#include <asm/setup.h> 42#include <asm/setup.h>
42#include <asm/irq.h> 43#include <asm/irq.h>
43#include <asm/io.h> 44#include <asm/io.h>
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index d03a9a849c06..785e4a535f9e 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -56,6 +56,7 @@
56#include <linux/if_vlan.h> 56#include <linux/if_vlan.h>
57#include <linux/bitops.h> 57#include <linux/bitops.h>
58#include <linux/mutex.h> 58#include <linux/mutex.h>
59#include <linux/mm.h>
59 60
60#include <asm/system.h> 61#include <asm/system.h>
61#include <asm/io.h> 62#include <asm/io.h>
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index ec432ea879fb..ef671739cfea 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -32,6 +32,7 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/mm.h>
35#include <linux/bitops.h> 36#include <linux/bitops.h>
36 37
37#include <asm/system.h> 38#include <asm/system.h>
@@ -3012,6 +3013,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3012#endif 3013#endif
3013 3014
3014 err = -ENODEV; 3015 err = -ENODEV;
3016
3017 if (pci_enable_device(pdev))
3018 goto err_out;
3019 pci_set_master(pdev);
3020
3015 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { 3021 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3016 qp = quattro_pci_find(pdev); 3022 qp = quattro_pci_find(pdev);
3017 if (qp == NULL) 3023 if (qp == NULL)
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index bfe59865b1dd..0d97e10ccac5 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1826,7 +1826,7 @@ static void tr_rx(struct net_device *dev)
1826 skb->protocol = tr_type_trans(skb, dev); 1826 skb->protocol = tr_type_trans(skb, dev);
1827 if (IPv4_p) { 1827 if (IPv4_p) {
1828 skb->csum = chksum; 1828 skb->csum = chksum;
1829 skb->ip_summed = 1; 1829 skb->ip_summed = CHECKSUM_COMPLETE;
1830 } 1830 }
1831 netif_rx(skb); 1831 netif_rx(skb);
1832 dev->last_rx = jiffies; 1832 dev->last_rx = jiffies;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 3f4b6408b755..4b3cd3d8b62a 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -473,9 +473,9 @@
473#include <asm/byteorder.h> 473#include <asm/byteorder.h>
474#include <asm/unaligned.h> 474#include <asm/unaligned.h>
475#include <asm/uaccess.h> 475#include <asm/uaccess.h>
476#ifdef CONFIG_PPC_MULTIPLATFORM 476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h> 477#include <asm/machdep.h>
478#endif /* CONFIG_PPC_MULTIPLATFORM */ 478#endif /* CONFIG_PPC_PMAC */
479 479
480#include "de4x5.h" 480#include "de4x5.h"
481 481
@@ -4151,7 +4151,7 @@ get_hw_addr(struct net_device *dev)
4151 /* If possible, try to fix a broken card - SMC only so far */ 4151 /* If possible, try to fix a broken card - SMC only so far */
4152 srom_repair(dev, broken); 4152 srom_repair(dev, broken);
4153 4153
4154#ifdef CONFIG_PPC_MULTIPLATFORM 4154#ifdef CONFIG_PPC_PMAC
4155 /* 4155 /*
4156 ** If the address starts with 00 a0, we have to bit-reverse 4156 ** If the address starts with 00 a0, we have to bit-reverse
4157 ** each byte of the address. 4157 ** each byte of the address.
@@ -4168,7 +4168,7 @@ get_hw_addr(struct net_device *dev)
4168 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); 4168 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4169 } 4169 }
4170 } 4170 }
4171#endif /* CONFIG_PPC_MULTIPLATFORM */ 4171#endif /* CONFIG_PPC_PMAC */
4172 4172
4173 /* Test for a bad enet address */ 4173 /* Test for a bad enet address */
4174 status = test_bad_enet(dev, status); 4174 status = test_bad_enet(dev, status);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 8ddea1da7c05..9781b16bb8b6 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -117,6 +117,7 @@ static const int multicast_filter_limit = 32;
117#include <linux/netdevice.h> 117#include <linux/netdevice.h>
118#include <linux/etherdevice.h> 118#include <linux/etherdevice.h>
119#include <linux/skbuff.h> 119#include <linux/skbuff.h>
120#include <linux/mm.h>
120#include <linux/init.h> 121#include <linux/init.h>
121#include <linux/delay.h> 122#include <linux/delay.h>
122#include <linux/ethtool.h> 123#include <linux/ethtool.h>
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 73a58c73d526..fc405f0165d9 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -1,5 +1,6 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/sched.h>
3#include <linux/ioport.h> 4#include <linux/ioport.h>
4#include <linux/wait.h> 5#include <linux/wait.h>
5 6
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
new file mode 100644
index 000000000000..b52d547b7a78
--- /dev/null
+++ b/drivers/ps3/Makefile
@@ -0,0 +1 @@
obj-y += system-bus.o
diff --git a/drivers/ps3/system-bus.c b/drivers/ps3/system-bus.c
new file mode 100644
index 000000000000..d79f949bcb2a
--- /dev/null
+++ b/drivers/ps3/system-bus.c
@@ -0,0 +1,362 @@
1/*
2 * PS3 system bus driver.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/dma-mapping.h>
25#include <linux/err.h>
26
27#include <asm/udbg.h>
28#include <asm/ps3.h>
29#include <asm/lv1call.h>
30#include <asm/firmware.h>
31
32#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
33static void _dump_mmio_region(const struct ps3_mmio_region* r,
34 const char* func, int line)
35{
36 pr_debug("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
37 r->did.dev_id);
38 pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
39 pr_debug("%s:%d: len %lxh\n", func, line, r->len);
40 pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
41}
42
43int ps3_mmio_region_create(struct ps3_mmio_region *r)
44{
45 int result;
46
47 result = lv1_map_device_mmio_region(r->did.bus_id, r->did.dev_id,
48 r->bus_addr, r->len, r->page_size, &r->lpar_addr);
49
50 if (result) {
51 pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
52 __func__, __LINE__, ps3_result(result));
53 r->lpar_addr = r->len = r->bus_addr = 0;
54 }
55
56 dump_mmio_region(r);
57 return result;
58}
59
60int ps3_free_mmio_region(struct ps3_mmio_region *r)
61{
62 int result;
63
64 result = lv1_unmap_device_mmio_region(r->did.bus_id, r->did.dev_id,
65 r->bus_addr);
66
67 if (result)
68 pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
69 __func__, __LINE__, ps3_result(result));
70
71 r->lpar_addr = r->len = r->bus_addr = 0;
72 return result;
73}
74
75static int ps3_system_bus_match(struct device *_dev,
76 struct device_driver *_drv)
77{
78 int result;
79 struct ps3_system_bus_driver *drv = to_ps3_system_bus_driver(_drv);
80 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
81
82 result = dev->match_id == drv->match_id;
83
84 pr_info("%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__, __LINE__,
85 dev->match_id, dev->core.bus_id, drv->match_id, drv->core.name,
86 (result ? "match" : "miss"));
87 return result;
88}
89
90static int ps3_system_bus_probe(struct device *_dev)
91{
92 int result;
93 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
94 struct ps3_system_bus_driver *drv =
95 to_ps3_system_bus_driver(_dev->driver);
96
97 result = lv1_open_device(dev->did.bus_id, dev->did.dev_id, 0);
98
99 if (result) {
100 pr_debug("%s:%d: lv1_open_device failed (%d)\n",
101 __func__, __LINE__, result);
102 result = -EACCES;
103 goto clean_none;
104 }
105
106 if (dev->d_region->did.bus_id) {
107 result = ps3_dma_region_create(dev->d_region);
108
109 if (result) {
110 pr_debug("%s:%d: ps3_dma_region_create failed (%d)\n",
111 __func__, __LINE__, result);
112 BUG_ON("check region type");
113 result = -EINVAL;
114 goto clean_device;
115 }
116 }
117
118 BUG_ON(!drv);
119
120 if (drv->probe)
121 result = drv->probe(dev);
122 else
123 pr_info("%s:%d: %s no probe method\n", __func__, __LINE__,
124 dev->core.bus_id);
125
126 if (result) {
127 pr_debug("%s:%d: drv->probe failed\n", __func__, __LINE__);
128 goto clean_dma;
129 }
130
131 return result;
132
133clean_dma:
134 ps3_dma_region_free(dev->d_region);
135clean_device:
136 lv1_close_device(dev->did.bus_id, dev->did.dev_id);
137clean_none:
138 return result;
139}
140
141static int ps3_system_bus_remove(struct device *_dev)
142{
143 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
144 struct ps3_system_bus_driver *drv =
145 to_ps3_system_bus_driver(_dev->driver);
146
147 if (drv->remove)
148 drv->remove(dev);
149 else
150 pr_info("%s:%d: %s no remove method\n", __func__, __LINE__,
151 dev->core.bus_id);
152
153 ps3_dma_region_free(dev->d_region);
154 ps3_free_mmio_region(dev->m_region);
155 lv1_close_device(dev->did.bus_id, dev->did.dev_id);
156
157 return 0;
158}
159
160struct bus_type ps3_system_bus_type = {
161 .name = "ps3_system_bus",
162 .match = ps3_system_bus_match,
163 .probe = ps3_system_bus_probe,
164 .remove = ps3_system_bus_remove,
165};
166
167int __init ps3_system_bus_init(void)
168{
169 int result;
170
171 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
172 return 0;
173
174 result = bus_register(&ps3_system_bus_type);
175 BUG_ON(result);
176 return result;
177}
178
179core_initcall(ps3_system_bus_init);
180
181/* Allocates a contiguous real buffer and creates mappings over it.
182 * Returns the virtual address of the buffer and sets dma_handle
183 * to the dma address (mapping) of the first page.
184 */
185
186static void * ps3_alloc_coherent(struct device *_dev, size_t size,
187 dma_addr_t *dma_handle, gfp_t flag)
188{
189 int result;
190 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
191 unsigned long virt_addr;
192
193 BUG_ON(!dev->d_region->bus_addr);
194
195 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
196 flag |= __GFP_ZERO;
197
198 virt_addr = __get_free_pages(flag, get_order(size));
199
200 if (!virt_addr) {
201 pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
202 goto clean_none;
203 }
204
205 result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle);
206
207 if (result) {
208 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
209 __func__, __LINE__, result);
210 BUG_ON("check region type");
211 goto clean_alloc;
212 }
213
214 return (void*)virt_addr;
215
216clean_alloc:
217 free_pages(virt_addr, get_order(size));
218clean_none:
219 dma_handle = NULL;
220 return NULL;
221}
222
223static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
224 dma_addr_t dma_handle)
225{
226 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
227
228 ps3_dma_unmap(dev->d_region, dma_handle, size);
229 free_pages((unsigned long)vaddr, get_order(size));
230}
231
232/* Creates TCEs for a user provided buffer. The user buffer must be
233 * contiguous real kernel storage (not vmalloc). The address of the buffer
234 * passed here is the kernel (virtual) address of the buffer. The buffer
235 * need not be page aligned, the dma_addr_t returned will point to the same
236 * byte within the page as vaddr.
237 */
238
239static dma_addr_t ps3_map_single(struct device *_dev, void *ptr, size_t size,
240 enum dma_data_direction direction)
241{
242 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
243 int result;
244 unsigned long bus_addr;
245
246 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
247 &bus_addr);
248
249 if (result) {
250 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
251 __func__, __LINE__, result);
252 }
253
254 return bus_addr;
255}
256
257static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
258 size_t size, enum dma_data_direction direction)
259{
260 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
261 int result;
262
263 result = ps3_dma_unmap(dev->d_region, dma_addr, size);
264
265 if (result) {
266 pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
267 __func__, __LINE__, result);
268 }
269}
270
271static int ps3_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
272 enum dma_data_direction direction)
273{
274#if defined(CONFIG_PS3_DYNAMIC_DMA)
275 BUG_ON("do");
276#endif
277 return 0;
278}
279
280static void ps3_unmap_sg(struct device *_dev, struct scatterlist *sg,
281 int nents, enum dma_data_direction direction)
282{
283#if defined(CONFIG_PS3_DYNAMIC_DMA)
284 BUG_ON("do");
285#endif
286}
287
288static int ps3_dma_supported(struct device *_dev, u64 mask)
289{
290 return 1;
291}
292
293static struct dma_mapping_ops ps3_dma_ops = {
294 .alloc_coherent = ps3_alloc_coherent,
295 .free_coherent = ps3_free_coherent,
296 .map_single = ps3_map_single,
297 .unmap_single = ps3_unmap_single,
298 .map_sg = ps3_map_sg,
299 .unmap_sg = ps3_unmap_sg,
300 .dma_supported = ps3_dma_supported
301};
302
303/**
304 * ps3_system_bus_release_device - remove a device from the system bus
305 */
306
307static void ps3_system_bus_release_device(struct device *_dev)
308{
309 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
310 kfree(dev);
311}
312
313/**
314 * ps3_system_bus_device_register - add a device to the system bus
315 *
316 * ps3_system_bus_device_register() expects the dev object to be allocated
317 * dynamically by the caller. The system bus takes ownership of the dev
318 * object and frees the object in ps3_system_bus_release_device().
319 */
320
321int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
322{
323 int result;
324 static unsigned int dev_count = 1;
325
326 dev->core.parent = NULL;
327 dev->core.bus = &ps3_system_bus_type;
328 dev->core.release = ps3_system_bus_release_device;
329
330 dev->core.archdata.of_node = NULL;
331 dev->core.archdata.dma_ops = &ps3_dma_ops;
332 dev->core.archdata.numa_node = 0;
333
334 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "sb_%02x",
335 dev_count++);
336
337 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
338
339 result = device_register(&dev->core);
340 return result;
341}
342
343EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
344
345int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
346{
347 int result;
348
349 drv->core.bus = &ps3_system_bus_type;
350
351 result = driver_register(&drv->core);
352 return result;
353}
354
355EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
356
357void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
358{
359 driver_unregister(&drv->core);
360}
361
362EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 79ffef6bfaf8..a2cef57d7bcb 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1264,15 +1264,21 @@ __dasd_check_expire(struct dasd_device * device)
1264 if (list_empty(&device->ccw_queue)) 1264 if (list_empty(&device->ccw_queue))
1265 return; 1265 return;
1266 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1266 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1267 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { 1267 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1268 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { 1268 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1269 if (device->discipline->term_IO(cqr) != 0) {
1270 /* Hmpf, try again in 5 sec */
1271 dasd_set_timer(device, 5*HZ);
1272 DEV_MESSAGE(KERN_ERR, device,
1273 "internal error - timeout (%is) expired "
1274 "for cqr %p, termination failed, "
1275 "retrying in 5s",
1276 (cqr->expires/HZ), cqr);
1277 } else {
1269 DEV_MESSAGE(KERN_ERR, device, 1278 DEV_MESSAGE(KERN_ERR, device,
1270 "internal error - timeout (%is) expired " 1279 "internal error - timeout (%is) expired "
1271 "for cqr %p (%i retries left)", 1280 "for cqr %p (%i retries left)",
1272 (cqr->expires/HZ), cqr, cqr->retries); 1281 (cqr->expires/HZ), cqr, cqr->retries);
1273 if (device->discipline->term_IO(cqr) != 0)
1274 /* Hmpf, try again in 1/10 sec */
1275 dasd_set_timer(device, 10);
1276 } 1282 }
1277 } 1283 }
1278} 1284}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 91cf971f0652..17fdd8c9f740 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -684,21 +684,26 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
684 const char *buf, size_t count) 684 const char *buf, size_t count)
685{ 685{
686 struct dasd_devmap *devmap; 686 struct dasd_devmap *devmap;
687 int ro_flag; 687 int val;
688 char *endp;
688 689
689 devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); 690 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
690 if (IS_ERR(devmap)) 691 if (IS_ERR(devmap))
691 return PTR_ERR(devmap); 692 return PTR_ERR(devmap);
692 ro_flag = buf[0] == '1'; 693
694 val = simple_strtoul(buf, &endp, 0);
695 if (((endp + 1) < (buf + count)) || (val > 1))
696 return -EINVAL;
697
693 spin_lock(&dasd_devmap_lock); 698 spin_lock(&dasd_devmap_lock);
694 if (ro_flag) 699 if (val)
695 devmap->features |= DASD_FEATURE_READONLY; 700 devmap->features |= DASD_FEATURE_READONLY;
696 else 701 else
697 devmap->features &= ~DASD_FEATURE_READONLY; 702 devmap->features &= ~DASD_FEATURE_READONLY;
698 if (devmap->device) 703 if (devmap->device)
699 devmap->device->features = devmap->features; 704 devmap->device->features = devmap->features;
700 if (devmap->device && devmap->device->gdp) 705 if (devmap->device && devmap->device->gdp)
701 set_disk_ro(devmap->device->gdp, ro_flag); 706 set_disk_ro(devmap->device->gdp, val);
702 spin_unlock(&dasd_devmap_lock); 707 spin_unlock(&dasd_devmap_lock);
703 return count; 708 return count;
704} 709}
@@ -729,17 +734,22 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
729{ 734{
730 struct dasd_devmap *devmap; 735 struct dasd_devmap *devmap;
731 ssize_t rc; 736 ssize_t rc;
732 int use_diag; 737 int val;
738 char *endp;
733 739
734 devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); 740 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
735 if (IS_ERR(devmap)) 741 if (IS_ERR(devmap))
736 return PTR_ERR(devmap); 742 return PTR_ERR(devmap);
737 use_diag = buf[0] == '1'; 743
744 val = simple_strtoul(buf, &endp, 0);
745 if (((endp + 1) < (buf + count)) || (val > 1))
746 return -EINVAL;
747
738 spin_lock(&dasd_devmap_lock); 748 spin_lock(&dasd_devmap_lock);
739 /* Changing diag discipline flag is only allowed in offline state. */ 749 /* Changing diag discipline flag is only allowed in offline state. */
740 rc = count; 750 rc = count;
741 if (!devmap->device) { 751 if (!devmap->device) {
742 if (use_diag) 752 if (val)
743 devmap->features |= DASD_FEATURE_USEDIAG; 753 devmap->features |= DASD_FEATURE_USEDIAG;
744 else 754 else
745 devmap->features &= ~DASD_FEATURE_USEDIAG; 755 devmap->features &= ~DASD_FEATURE_USEDIAG;
@@ -854,14 +864,20 @@ dasd_eer_store(struct device *dev, struct device_attribute *attr,
854 const char *buf, size_t count) 864 const char *buf, size_t count)
855{ 865{
856 struct dasd_devmap *devmap; 866 struct dasd_devmap *devmap;
857 int rc; 867 int val, rc;
868 char *endp;
858 869
859 devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); 870 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
860 if (IS_ERR(devmap)) 871 if (IS_ERR(devmap))
861 return PTR_ERR(devmap); 872 return PTR_ERR(devmap);
862 if (!devmap->device) 873 if (!devmap->device)
863 return count; 874 return -ENODEV;
864 if (buf[0] == '1') { 875
876 val = simple_strtoul(buf, &endp, 0);
877 if (((endp + 1) < (buf + count)) || (val > 1))
878 return -EINVAL;
879
880 if (val) {
865 rc = dasd_eer_enable(devmap->device); 881 rc = dasd_eer_enable(devmap->device);
866 if (rc) 882 if (rc)
867 return rc; 883 return rc;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index d7de175d53f0..c9321b920e90 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -299,14 +299,14 @@ raw3215_timeout(unsigned long __data)
299 struct raw3215_info *raw = (struct raw3215_info *) __data; 299 struct raw3215_info *raw = (struct raw3215_info *) __data;
300 unsigned long flags; 300 unsigned long flags;
301 301
302 spin_lock_irqsave(raw->lock, flags); 302 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
303 if (raw->flags & RAW3215_TIMER_RUNS) { 303 if (raw->flags & RAW3215_TIMER_RUNS) {
304 del_timer(&raw->timer); 304 del_timer(&raw->timer);
305 raw->flags &= ~RAW3215_TIMER_RUNS; 305 raw->flags &= ~RAW3215_TIMER_RUNS;
306 raw3215_mk_write_req(raw); 306 raw3215_mk_write_req(raw);
307 raw3215_start_io(raw); 307 raw3215_start_io(raw);
308 } 308 }
309 spin_unlock_irqrestore(raw->lock, flags); 309 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
310} 310}
311 311
312/* 312/*
@@ -355,10 +355,10 @@ raw3215_tasklet(void *data)
355 unsigned long flags; 355 unsigned long flags;
356 356
357 raw = (struct raw3215_info *) data; 357 raw = (struct raw3215_info *) data;
358 spin_lock_irqsave(raw->lock, flags); 358 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
359 raw3215_mk_write_req(raw); 359 raw3215_mk_write_req(raw);
360 raw3215_try_io(raw); 360 raw3215_try_io(raw);
361 spin_unlock_irqrestore(raw->lock, flags); 361 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
362 /* Check for pending message from raw3215_irq */ 362 /* Check for pending message from raw3215_irq */
363 if (raw->message != NULL) { 363 if (raw->message != NULL) {
364 printk(raw->message, raw->msg_dstat, raw->msg_cstat); 364 printk(raw->message, raw->msg_dstat, raw->msg_cstat);
@@ -512,9 +512,9 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length)
512 if (RAW3215_BUFFER_SIZE - raw->count >= length) 512 if (RAW3215_BUFFER_SIZE - raw->count >= length)
513 break; 513 break;
514 /* there might be another cpu waiting for the lock */ 514 /* there might be another cpu waiting for the lock */
515 spin_unlock(raw->lock); 515 spin_unlock(get_ccwdev_lock(raw->cdev));
516 udelay(100); 516 udelay(100);
517 spin_lock(raw->lock); 517 spin_lock(get_ccwdev_lock(raw->cdev));
518 } 518 }
519} 519}
520 520
@@ -528,7 +528,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
528 int c, count; 528 int c, count;
529 529
530 while (length > 0) { 530 while (length > 0) {
531 spin_lock_irqsave(raw->lock, flags); 531 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
532 count = (length > RAW3215_BUFFER_SIZE) ? 532 count = (length > RAW3215_BUFFER_SIZE) ?
533 RAW3215_BUFFER_SIZE : length; 533 RAW3215_BUFFER_SIZE : length;
534 length -= count; 534 length -= count;
@@ -555,7 +555,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length)
555 /* start or queue request */ 555 /* start or queue request */
556 raw3215_try_io(raw); 556 raw3215_try_io(raw);
557 } 557 }
558 spin_unlock_irqrestore(raw->lock, flags); 558 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
559 } 559 }
560} 560}
561 561
@@ -568,7 +568,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
568 unsigned long flags; 568 unsigned long flags;
569 unsigned int length, i; 569 unsigned int length, i;
570 570
571 spin_lock_irqsave(raw->lock, flags); 571 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
572 if (ch == '\t') { 572 if (ch == '\t') {
573 length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE); 573 length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
574 raw->line_pos += length; 574 raw->line_pos += length;
@@ -592,7 +592,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
592 /* start or queue request */ 592 /* start or queue request */
593 raw3215_try_io(raw); 593 raw3215_try_io(raw);
594 } 594 }
595 spin_unlock_irqrestore(raw->lock, flags); 595 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
596} 596}
597 597
598/* 598/*
@@ -604,13 +604,13 @@ raw3215_flush_buffer(struct raw3215_info *raw)
604{ 604{
605 unsigned long flags; 605 unsigned long flags;
606 606
607 spin_lock_irqsave(raw->lock, flags); 607 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
608 if (raw->count > 0) { 608 if (raw->count > 0) {
609 raw->flags |= RAW3215_FLUSHING; 609 raw->flags |= RAW3215_FLUSHING;
610 raw3215_try_io(raw); 610 raw3215_try_io(raw);
611 raw->flags &= ~RAW3215_FLUSHING; 611 raw->flags &= ~RAW3215_FLUSHING;
612 } 612 }
613 spin_unlock_irqrestore(raw->lock, flags); 613 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
614} 614}
615 615
616/* 616/*
@@ -625,9 +625,9 @@ raw3215_startup(struct raw3215_info *raw)
625 return 0; 625 return 0;
626 raw->line_pos = 0; 626 raw->line_pos = 0;
627 raw->flags |= RAW3215_ACTIVE; 627 raw->flags |= RAW3215_ACTIVE;
628 spin_lock_irqsave(raw->lock, flags); 628 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
629 raw3215_try_io(raw); 629 raw3215_try_io(raw);
630 spin_unlock_irqrestore(raw->lock, flags); 630 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
631 631
632 return 0; 632 return 0;
633} 633}
@@ -644,21 +644,21 @@ raw3215_shutdown(struct raw3215_info *raw)
644 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED)) 644 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
645 return; 645 return;
646 /* Wait for outstanding requests, then free irq */ 646 /* Wait for outstanding requests, then free irq */
647 spin_lock_irqsave(raw->lock, flags); 647 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
648 if ((raw->flags & RAW3215_WORKING) || 648 if ((raw->flags & RAW3215_WORKING) ||
649 raw->queued_write != NULL || 649 raw->queued_write != NULL ||
650 raw->queued_read != NULL) { 650 raw->queued_read != NULL) {
651 raw->flags |= RAW3215_CLOSING; 651 raw->flags |= RAW3215_CLOSING;
652 add_wait_queue(&raw->empty_wait, &wait); 652 add_wait_queue(&raw->empty_wait, &wait);
653 set_current_state(TASK_INTERRUPTIBLE); 653 set_current_state(TASK_INTERRUPTIBLE);
654 spin_unlock_irqrestore(raw->lock, flags); 654 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
655 schedule(); 655 schedule();
656 spin_lock_irqsave(raw->lock, flags); 656 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
657 remove_wait_queue(&raw->empty_wait, &wait); 657 remove_wait_queue(&raw->empty_wait, &wait);
658 set_current_state(TASK_RUNNING); 658 set_current_state(TASK_RUNNING);
659 raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING); 659 raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
660 } 660 }
661 spin_unlock_irqrestore(raw->lock, flags); 661 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
662} 662}
663 663
664static int 664static int
@@ -686,7 +686,6 @@ raw3215_probe (struct ccw_device *cdev)
686 } 686 }
687 687
688 raw->cdev = cdev; 688 raw->cdev = cdev;
689 raw->lock = get_ccwdev_lock(cdev);
690 raw->inbuf = (char *) raw + sizeof(struct raw3215_info); 689 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
691 memset(raw, 0, sizeof(struct raw3215_info)); 690 memset(raw, 0, sizeof(struct raw3215_info));
692 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, 691 raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
@@ -809,9 +808,9 @@ con3215_unblank(void)
809 unsigned long flags; 808 unsigned long flags;
810 809
811 raw = raw3215[0]; /* console 3215 is the first one */ 810 raw = raw3215[0]; /* console 3215 is the first one */
812 spin_lock_irqsave(raw->lock, flags); 811 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
813 raw3215_make_room(raw, RAW3215_BUFFER_SIZE); 812 raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
814 spin_unlock_irqrestore(raw->lock, flags); 813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
815} 814}
816 815
817static int __init 816static int __init
@@ -873,7 +872,6 @@ con3215_init(void)
873 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); 872 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
874 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); 873 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
875 raw->cdev = cdev; 874 raw->cdev = cdev;
876 raw->lock = get_ccwdev_lock(cdev);
877 cdev->dev.driver_data = raw; 875 cdev->dev.driver_data = raw;
878 cdev->handler = raw3215_irq; 876 cdev->handler = raw3215_irq;
879 877
@@ -1066,10 +1064,10 @@ tty3215_unthrottle(struct tty_struct * tty)
1066 1064
1067 raw = (struct raw3215_info *) tty->driver_data; 1065 raw = (struct raw3215_info *) tty->driver_data;
1068 if (raw->flags & RAW3215_THROTTLED) { 1066 if (raw->flags & RAW3215_THROTTLED) {
1069 spin_lock_irqsave(raw->lock, flags); 1067 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
1070 raw->flags &= ~RAW3215_THROTTLED; 1068 raw->flags &= ~RAW3215_THROTTLED;
1071 raw3215_try_io(raw); 1069 raw3215_try_io(raw);
1072 spin_unlock_irqrestore(raw->lock, flags); 1070 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
1073 } 1071 }
1074} 1072}
1075 1073
@@ -1096,10 +1094,10 @@ tty3215_start(struct tty_struct *tty)
1096 1094
1097 raw = (struct raw3215_info *) tty->driver_data; 1095 raw = (struct raw3215_info *) tty->driver_data;
1098 if (raw->flags & RAW3215_STOPPED) { 1096 if (raw->flags & RAW3215_STOPPED) {
1099 spin_lock_irqsave(raw->lock, flags); 1097 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
1100 raw->flags &= ~RAW3215_STOPPED; 1098 raw->flags &= ~RAW3215_STOPPED;
1101 raw3215_try_io(raw); 1099 raw3215_try_io(raw);
1102 spin_unlock_irqrestore(raw->lock, flags); 1100 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
1103 } 1101 }
1104} 1102}
1105 1103
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 32004aae95c1..ffa9282ce97a 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -19,52 +19,17 @@
19 19
20#include "sclp.h" 20#include "sclp.h"
21 21
22
23#ifdef CONFIG_SMP
24/* Signal completion of shutdown process. All CPUs except the first to enter
25 * this function: go to stopped state. First CPU: wait until all other
26 * CPUs are in stopped or check stop state. Afterwards, load special PSW
27 * to indicate completion. */
28static void
29do_load_quiesce_psw(void * __unused)
30{
31 static atomic_t cpuid = ATOMIC_INIT(-1);
32 psw_t quiesce_psw;
33 int cpu;
34
35 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) {
39 if (cpu == smp_processor_id())
40 continue;
41 while(!smp_cpu_not_running(cpu))
42 cpu_relax();
43 }
44 /* Quiesce the last cpu with the special psw */
45 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
46 quiesce_psw.addr = 0xfff;
47 __load_psw(quiesce_psw);
48}
49
50/* Shutdown handler. Perform shutdown function on all CPUs. */
51static void
52do_machine_quiesce(void)
53{
54 on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
55}
56#else
57/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ 22/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
58static void 23static void
59do_machine_quiesce(void) 24do_machine_quiesce(void)
60{ 25{
61 psw_t quiesce_psw; 26 psw_t quiesce_psw;
62 27
28 smp_send_stop();
63 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; 29 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
64 quiesce_psw.addr = 0xfff; 30 quiesce_psw.addr = 0xfff;
65 __load_psw(quiesce_psw); 31 __load_psw(quiesce_psw);
66} 32}
67#endif
68 33
69/* Handler for quiesce event. Start shutdown procedure. */ 34/* Handler for quiesce event. Start shutdown procedure. */
70static void 35static void
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 2d78f0f4a40f..dbfb77b03928 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -251,6 +251,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
251 cc = cio_clear(sch); 251 cc = cio_clear(sch);
252 if (cc == -ENODEV) 252 if (cc == -ENODEV)
253 goto out_unreg; 253 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
254 /* Call handler. */ 256 /* Call handler. */
255 if (sch->driver && sch->driver->termination) 257 if (sch->driver && sch->driver->termination)
256 sch->driver->termination(&sch->dev); 258 sch->driver->termination(&sch->dev);
@@ -711,9 +713,6 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
711{ 713{
712 int cc; 714 int cc;
713 715
714 if (!device_is_online(sch))
715 /* cio could be doing I/O. */
716 return 0;
717 cc = stsch(sch->schid, &sch->schib); 716 cc = stsch(sch->schid, &sch->schib);
718 if (cc) 717 if (cc)
719 return 0; 718 return 0;
@@ -722,6 +721,26 @@ static inline int check_for_io_on_path(struct subchannel *sch, int index)
722 return 0; 721 return 0;
723} 722}
724 723
724static void terminate_internal_io(struct subchannel *sch)
725{
726 if (cio_clear(sch)) {
727 /* Recheck device in case clear failed. */
728 sch->lpm = 0;
729 if (device_trigger_verify(sch) != 0) {
730 if(css_enqueue_subchannel_slow(sch->schid)) {
731 css_clear_subchannel_slow_list();
732 need_rescan = 1;
733 }
734 }
735 return;
736 }
737 /* Request retry of internal operation. */
738 device_set_intretry(sch);
739 /* Call handler. */
740 if (sch->driver && sch->driver->termination)
741 sch->driver->termination(&sch->dev);
742}
743
725static inline void 744static inline void
726__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
727{ 746{
@@ -744,20 +763,26 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
744 device_trigger_reprobe(sch); 763 device_trigger_reprobe(sch);
745 else if (sch->driver && sch->driver->verify) 764 else if (sch->driver && sch->driver->verify)
746 sch->driver->verify(&sch->dev); 765 sch->driver->verify(&sch->dev);
747 } else { 766 break;
748 sch->opm &= ~(0x80 >> chp); 767 }
749 sch->lpm &= ~(0x80 >> chp); 768 sch->opm &= ~(0x80 >> chp);
750 if (check_for_io_on_path(sch, chp)) 769 sch->lpm &= ~(0x80 >> chp);
770 if (check_for_io_on_path(sch, chp)) {
771 if (device_is_online(sch))
751 /* Path verification is done after killing. */ 772 /* Path verification is done after killing. */
752 device_kill_io(sch); 773 device_kill_io(sch);
753 else if (!sch->lpm) { 774 else
775 /* Kill and retry internal I/O. */
776 terminate_internal_io(sch);
777 } else if (!sch->lpm) {
778 if (device_trigger_verify(sch) != 0) {
754 if (css_enqueue_subchannel_slow(sch->schid)) { 779 if (css_enqueue_subchannel_slow(sch->schid)) {
755 css_clear_subchannel_slow_list(); 780 css_clear_subchannel_slow_list();
756 need_rescan = 1; 781 need_rescan = 1;
757 } 782 }
758 } else if (sch->driver && sch->driver->verify) 783 }
759 sch->driver->verify(&sch->dev); 784 } else if (sch->driver && sch->driver->verify)
760 } 785 sch->driver->verify(&sch->dev);
761 break; 786 break;
762 } 787 }
763 spin_unlock_irqrestore(&sch->lock, flags); 788 spin_unlock_irqrestore(&sch->lock, flags);
@@ -1465,41 +1490,6 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1465 return desc; 1490 return desc;
1466} 1491}
1467 1492
1468static int reset_channel_path(struct channel_path *chp)
1469{
1470 int cc;
1471
1472 cc = rchp(chp->id);
1473 switch (cc) {
1474 case 0:
1475 return 0;
1476 case 2:
1477 return -EBUSY;
1478 default:
1479 return -ENODEV;
1480 }
1481}
1482
1483static void reset_channel_paths_css(struct channel_subsystem *css)
1484{
1485 int i;
1486
1487 for (i = 0; i <= __MAX_CHPID; i++) {
1488 if (css->chps[i])
1489 reset_channel_path(css->chps[i]);
1490 }
1491}
1492
1493void cio_reset_channel_paths(void)
1494{
1495 int i;
1496
1497 for (i = 0; i <= __MAX_CSSID; i++) {
1498 if (css[i] && css[i]->valid)
1499 reset_channel_paths_css(css[i]);
1500 }
1501}
1502
1503static int __init 1493static int __init
1504chsc_alloc_sei_area(void) 1494chsc_alloc_sei_area(void)
1505{ 1495{
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8936e460a807..20aee2783847 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -21,6 +21,7 @@
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/irq_regs.h> 22#include <asm/irq_regs.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/reset.h>
24#include "airq.h" 25#include "airq.h"
25#include "cio.h" 26#include "cio.h"
26#include "css.h" 27#include "css.h"
@@ -28,6 +29,7 @@
28#include "ioasm.h" 29#include "ioasm.h"
29#include "blacklist.h" 30#include "blacklist.h"
30#include "cio_debug.h" 31#include "cio_debug.h"
32#include "../s390mach.h"
31 33
32debug_info_t *cio_debug_msg_id; 34debug_info_t *cio_debug_msg_id;
33debug_info_t *cio_debug_trace_id; 35debug_info_t *cio_debug_trace_id;
@@ -841,26 +843,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
841 return -EBUSY; 843 return -EBUSY;
842} 844}
843 845
844struct sch_match_id { 846static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
845 struct subchannel_id schid;
846 struct ccw_dev_id devid;
847 int rc;
848};
849
850static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
851 void *data)
852{ 847{
853 struct schib schib; 848 struct schib schib;
854 struct sch_match_id *match_id = data;
855 849
856 if (stsch_err(schid, &schib)) 850 if (stsch_err(schid, &schib))
857 return -ENXIO; 851 return -ENXIO;
858 if (match_id && schib.pmcw.dnv &&
859 (schib.pmcw.dev == match_id->devid.devno) &&
860 (schid.ssid == match_id->devid.ssid)) {
861 match_id->schid = schid;
862 match_id->rc = 0;
863 }
864 if (!schib.pmcw.ena) 852 if (!schib.pmcw.ena)
865 return 0; 853 return 0;
866 switch(__disable_subchannel_easy(schid, &schib)) { 854 switch(__disable_subchannel_easy(schid, &schib)) {
@@ -876,27 +864,111 @@ static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
876 return 0; 864 return 0;
877} 865}
878 866
879static int clear_all_subchannels_and_match(struct ccw_dev_id *devid, 867static atomic_t chpid_reset_count;
880 struct subchannel_id *schid) 868
869static void s390_reset_chpids_mcck_handler(void)
870{
871 struct crw crw;
872 struct mci *mci;
873
874 /* Check for pending channel report word. */
875 mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
876 if (!mci->cp)
877 return;
878 /* Process channel report words. */
879 while (stcrw(&crw) == 0) {
880 /* Check for responses to RCHP. */
881 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
882 atomic_dec(&chpid_reset_count);
883 }
884}
885
886#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
887static void css_reset(void)
888{
889 int i, ret;
890 unsigned long long timeout;
891
892 /* Reset subchannels. */
893 for_each_subchannel(__shutdown_subchannel_easy, NULL);
894 /* Reset channel paths. */
895 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler;
896 /* Enable channel report machine checks. */
897 __ctl_set_bit(14, 28);
898 /* Temporarily reenable machine checks. */
899 local_mcck_enable();
900 for (i = 0; i <= __MAX_CHPID; i++) {
901 ret = rchp(i);
902 if ((ret == 0) || (ret == 2))
903 /*
904 * rchp either succeeded, or another rchp is already
905 * in progress. In either case, we'll get a crw.
906 */
907 atomic_inc(&chpid_reset_count);
908 }
909 /* Wait for machine check for all channel paths. */
910 timeout = get_clock() + (RCHP_TIMEOUT << 12);
911 while (atomic_read(&chpid_reset_count) != 0) {
912 if (get_clock() > timeout)
913 break;
914 cpu_relax();
915 }
916 /* Disable machine checks again. */
917 local_mcck_disable();
918 /* Disable channel report machine checks. */
919 __ctl_clear_bit(14, 28);
920 s390_reset_mcck_handler = NULL;
921}
922
923static struct reset_call css_reset_call = {
924 .fn = css_reset,
925};
926
927static int __init init_css_reset_call(void)
928{
929 atomic_set(&chpid_reset_count, 0);
930 register_reset_call(&css_reset_call);
931 return 0;
932}
933
934arch_initcall(init_css_reset_call);
935
936struct sch_match_id {
937 struct subchannel_id schid;
938 struct ccw_dev_id devid;
939 int rc;
940};
941
942static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
943{
944 struct schib schib;
945 struct sch_match_id *match_id = data;
946
947 if (stsch_err(schid, &schib))
948 return -ENXIO;
949 if (schib.pmcw.dnv &&
950 (schib.pmcw.dev == match_id->devid.devno) &&
951 (schid.ssid == match_id->devid.ssid)) {
952 match_id->schid = schid;
953 match_id->rc = 0;
954 return 1;
955 }
956 return 0;
957}
958
959static int reipl_find_schid(struct ccw_dev_id *devid,
960 struct subchannel_id *schid)
881{ 961{
882 struct sch_match_id match_id; 962 struct sch_match_id match_id;
883 963
884 match_id.devid = *devid; 964 match_id.devid = *devid;
885 match_id.rc = -ENODEV; 965 match_id.rc = -ENODEV;
886 local_irq_disable(); 966 for_each_subchannel(__reipl_subchannel_match, &match_id);
887 for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
888 if (match_id.rc == 0) 967 if (match_id.rc == 0)
889 *schid = match_id.schid; 968 *schid = match_id.schid;
890 return match_id.rc; 969 return match_id.rc;
891} 970}
892 971
893
894void clear_all_subchannels(void)
895{
896 local_irq_disable();
897 for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
898}
899
900extern void do_reipl_asm(__u32 schid); 972extern void do_reipl_asm(__u32 schid);
901 973
902/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 974/* Make sure all subchannels are quiet before we re-ipl an lpar. */
@@ -904,9 +976,9 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
904{ 976{
905 struct subchannel_id schid; 977 struct subchannel_id schid;
906 978
907 if (clear_all_subchannels_and_match(devid, &schid)) 979 s390_reset_system();
980 if (reipl_find_schid(devid, &schid) != 0)
908 panic("IPL Device not found\n"); 981 panic("IPL Device not found\n");
909 cio_reset_channel_paths();
910 do_reipl_asm(*((__u32*)&schid)); 982 do_reipl_asm(*((__u32*)&schid));
911} 983}
912 984
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4c2ff8336288..9ff064e71767 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -94,6 +94,7 @@ struct ccw_device_private {
94 unsigned int donotify:1; /* call notify function */ 94 unsigned int donotify:1; /* call notify function */
95 unsigned int recog_done:1; /* dev. recog. complete */ 95 unsigned int recog_done:1; /* dev. recog. complete */
96 unsigned int fake_irb:1; /* deliver faked irb */ 96 unsigned int fake_irb:1; /* deliver faked irb */
97 unsigned int intretry:1; /* retry internal operation */
97 } __attribute__((packed)) flags; 98 } __attribute__((packed)) flags;
98 unsigned long intparm; /* user interruption parameter */ 99 unsigned long intparm; /* user interruption parameter */
99 struct qdio_irq *qdio_data; 100 struct qdio_irq *qdio_data;
@@ -171,6 +172,8 @@ void device_trigger_reprobe(struct subchannel *);
171/* Helper functions for vary on/off. */ 172/* Helper functions for vary on/off. */
172int device_is_online(struct subchannel *); 173int device_is_online(struct subchannel *);
173void device_kill_io(struct subchannel *); 174void device_kill_io(struct subchannel *);
175void device_set_intretry(struct subchannel *sch);
176int device_trigger_verify(struct subchannel *sch);
174 177
175/* Machine check helper function. */ 178/* Machine check helper function. */
176void device_kill_pending_timer(struct subchannel *); 179void device_kill_pending_timer(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 39c98f940507..d3d3716ff84b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -687,8 +687,20 @@ io_subchannel_register(void *data)
687 cdev = data; 687 cdev = data;
688 sch = to_subchannel(cdev->dev.parent); 688 sch = to_subchannel(cdev->dev.parent);
689 689
690 /*
691 * io_subchannel_register() will also be called after device
692 * recognition has been done for a boxed device (which will already
693 * be registered). We need to reprobe since we may now have sense id
694 * information.
695 */
690 if (klist_node_attached(&cdev->dev.knode_parent)) { 696 if (klist_node_attached(&cdev->dev.knode_parent)) {
691 bus_rescan_devices(&ccw_bus_type); 697 if (!cdev->drv) {
698 ret = device_reprobe(&cdev->dev);
699 if (ret)
700 /* We can't do much here. */
701 dev_info(&cdev->dev, "device_reprobe() returned"
702 " %d\n", ret);
703 }
692 goto out; 704 goto out;
693 } 705 }
694 /* make it known to the system */ 706 /* make it known to the system */
@@ -948,6 +960,9 @@ io_subchannel_ioterm(struct device *dev)
948 cdev = dev->driver_data; 960 cdev = dev->driver_data;
949 if (!cdev) 961 if (!cdev)
950 return; 962 return;
963 /* Internal I/O will be retried by the interrupt handler. */
964 if (cdev->private->flags.intretry)
965 return;
951 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 966 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
952 if (cdev->handler) 967 if (cdev->handler)
953 cdev->handler(cdev, cdev->private->intparm, 968 cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index de3d0857db9f..09c7672eb3f3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -59,6 +59,27 @@ device_set_disconnected(struct subchannel *sch)
59 cdev->private->state = DEV_STATE_DISCONNECTED; 59 cdev->private->state = DEV_STATE_DISCONNECTED;
60} 60}
61 61
62void device_set_intretry(struct subchannel *sch)
63{
64 struct ccw_device *cdev;
65
66 cdev = sch->dev.driver_data;
67 if (!cdev)
68 return;
69 cdev->private->flags.intretry = 1;
70}
71
72int device_trigger_verify(struct subchannel *sch)
73{
74 struct ccw_device *cdev;
75
76 cdev = sch->dev.driver_data;
77 if (!cdev || !cdev->online)
78 return -EINVAL;
79 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
80 return 0;
81}
82
62/* 83/*
63 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 84 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
64 */ 85 */
@@ -893,6 +914,12 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
893 * had killed the original request. 914 * had killed the original request.
894 */ 915 */
895 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 916 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
917 /* Retry Basic Sense if requested. */
918 if (cdev->private->flags.intretry) {
919 cdev->private->flags.intretry = 0;
920 ccw_device_do_sense(cdev, irb);
921 return;
922 }
896 cdev->private->flags.dosense = 0; 923 cdev->private->flags.dosense = 0;
897 memset(&cdev->private->irb, 0, sizeof(struct irb)); 924 memset(&cdev->private->irb, 0, sizeof(struct irb));
898 ccw_device_accumulate_irb(cdev, irb); 925 ccw_device_accumulate_irb(cdev, irb);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index a74785b9e4eb..f17275917fe5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -191,6 +191,8 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
191 if ((sch->opm & cdev->private->imask) != 0 && 191 if ((sch->opm & cdev->private->imask) != 0 &&
192 cdev->private->iretry > 0) { 192 cdev->private->iretry > 0) {
193 cdev->private->iretry--; 193 cdev->private->iretry--;
194 /* Reset internal retry indication. */
195 cdev->private->flags.intretry = 0;
194 ret = cio_start (sch, cdev->private->iccws, 196 ret = cio_start (sch, cdev->private->iccws,
195 cdev->private->imask); 197 cdev->private->imask);
196 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 198 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -237,8 +239,14 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
237 return 0; /* Success */ 239 return 0; /* Success */
238 } 240 }
239 /* Check the error cases. */ 241 /* Check the error cases. */
240 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 242 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
243 /* Retry Sense ID if requested. */
244 if (cdev->private->flags.intretry) {
245 cdev->private->flags.intretry = 0;
246 return -EAGAIN;
247 }
241 return -ETIME; 248 return -ETIME;
249 }
242 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) { 250 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
243 /* 251 /*
244 * if the device doesn't support the SenseID 252 * if the device doesn't support the SenseID
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 2975ce888c19..cb1879a96818 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -71,6 +71,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
71 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]); 71 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
72 if (cdev->private->iretry > 0) { 72 if (cdev->private->iretry > 0) {
73 cdev->private->iretry--; 73 cdev->private->iretry--;
74 /* Reset internal retry indication. */
75 cdev->private->flags.intretry = 0;
74 ret = cio_start (sch, cdev->private->iccws, 76 ret = cio_start (sch, cdev->private->iccws,
75 cdev->private->imask); 77 cdev->private->imask);
76 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 78 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -122,8 +124,14 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
122 124
123 sch = to_subchannel(cdev->dev.parent); 125 sch = to_subchannel(cdev->dev.parent);
124 irb = &cdev->private->irb; 126 irb = &cdev->private->irb;
125 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 127 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
128 /* Retry Sense PGID if requested. */
129 if (cdev->private->flags.intretry) {
130 cdev->private->flags.intretry = 0;
131 return -EAGAIN;
132 }
126 return -ETIME; 133 return -ETIME;
134 }
127 if (irb->esw.esw0.erw.cons && 135 if (irb->esw.esw0.erw.cons &&
128 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) { 136 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
129 /* 137 /*
@@ -253,6 +261,8 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
253 ret = -EACCES; 261 ret = -EACCES;
254 if (cdev->private->iretry > 0) { 262 if (cdev->private->iretry > 0) {
255 cdev->private->iretry--; 263 cdev->private->iretry--;
264 /* Reset internal retry indication. */
265 cdev->private->flags.intretry = 0;
256 ret = cio_start (sch, cdev->private->iccws, 266 ret = cio_start (sch, cdev->private->iccws,
257 cdev->private->imask); 267 cdev->private->imask);
258 /* We expect an interrupt in case of success or busy 268 /* We expect an interrupt in case of success or busy
@@ -293,6 +303,8 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
293 ret = -EACCES; 303 ret = -EACCES;
294 if (cdev->private->iretry > 0) { 304 if (cdev->private->iretry > 0) {
295 cdev->private->iretry--; 305 cdev->private->iretry--;
306 /* Reset internal retry indication. */
307 cdev->private->flags.intretry = 0;
296 ret = cio_start (sch, cdev->private->iccws, 308 ret = cio_start (sch, cdev->private->iccws,
297 cdev->private->imask); 309 cdev->private->imask);
298 /* We expect an interrupt in case of success or busy 310 /* We expect an interrupt in case of success or busy
@@ -321,8 +333,14 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
321 333
322 sch = to_subchannel(cdev->dev.parent); 334 sch = to_subchannel(cdev->dev.parent);
323 irb = &cdev->private->irb; 335 irb = &cdev->private->irb;
324 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 336 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
337 /* Retry Set PGID if requested. */
338 if (cdev->private->flags.intretry) {
339 cdev->private->flags.intretry = 0;
340 return -EAGAIN;
341 }
325 return -ETIME; 342 return -ETIME;
343 }
326 if (irb->esw.esw0.erw.cons) { 344 if (irb->esw.esw0.erw.cons) {
327 if (irb->ecw[0] & SNS0_CMD_REJECT) 345 if (irb->ecw[0] & SNS0_CMD_REJECT)
328 return -EOPNOTSUPP; 346 return -EOPNOTSUPP;
@@ -360,8 +378,14 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
360 378
361 sch = to_subchannel(cdev->dev.parent); 379 sch = to_subchannel(cdev->dev.parent);
362 irb = &cdev->private->irb; 380 irb = &cdev->private->irb;
363 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) 381 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
382 /* Retry NOP if requested. */
383 if (cdev->private->flags.intretry) {
384 cdev->private->flags.intretry = 0;
385 return -EAGAIN;
386 }
364 return -ETIME; 387 return -ETIME;
388 }
365 if (irb->scsw.cc == 3) { 389 if (irb->scsw.cc == 3) {
366 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," 390 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
367 " lpm %02X, became 'not operational'\n", 391 " lpm %02X, became 'not operational'\n",
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 3f7cbce4cd87..bdcf930f7beb 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -319,6 +319,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
319 sch->sense_ccw.count = SENSE_MAX_COUNT; 319 sch->sense_ccw.count = SENSE_MAX_COUNT;
320 sch->sense_ccw.flags = CCW_FLAG_SLI; 320 sch->sense_ccw.flags = CCW_FLAG_SLI;
321 321
322 /* Reset internal retry indication. */
323 cdev->private->flags.intretry = 0;
324
322 return cio_start (sch, &sch->sense_ccw, 0xff); 325 return cio_start (sch, &sch->sense_ccw, 0xff);
323} 326}
324 327
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 476aa1da5cbc..8d5fa1b4d11f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -481,7 +481,7 @@ qdio_stop_polling(struct qdio_q *q)
481 unsigned char state = 0; 481 unsigned char state = 0;
482 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; 482 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
483 483
484 if (!atomic_swap(&q->polling,0)) 484 if (!atomic_xchg(&q->polling,0))
485 return 1; 485 return 1;
486 486
487 QDIO_DBF_TEXT4(0,trace,"stoppoll"); 487 QDIO_DBF_TEXT4(0,trace,"stoppoll");
@@ -1964,8 +1964,8 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1964 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); 1964 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1965 1965
1966 QDIO_PRINT_WARN("sense data available on qdio channel.\n"); 1966 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1967 HEXDUMP16(WARN,"irb: ",irb); 1967 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1968 HEXDUMP16(WARN,"sense data: ",irb->ecw); 1968 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1969 } 1969 }
1970 1970
1971} 1971}
@@ -3425,7 +3425,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3425 3425
3426 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& 3426 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3427 (callflags&QDIO_FLAG_UNDER_INTERRUPT)) 3427 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3428 atomic_swap(&q->polling,0); 3428 atomic_xchg(&q->polling,0);
3429 3429
3430 if (used_elements) 3430 if (used_elements)
3431 return; 3431 return;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 49bb9e371c32..42927c1b7451 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -236,7 +236,7 @@ enum qdio_irq_states {
236#define QDIO_PRINT_EMERG(x...) do { } while (0) 236#define QDIO_PRINT_EMERG(x...) do { } while (0)
237#endif 237#endif
238 238
239#define HEXDUMP16(importance,header,ptr) \ 239#define QDIO_HEXDUMP16(importance,header,ptr) \
240QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ 240QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
241 "%02x %02x %02x %02x %02x %02x %02x %02x " \ 241 "%02x %02x %02x %02x %02x %02x %02x %02x " \
242 "%02x %02x %02x %02x\n",*(((char*)ptr)), \ 242 "%02x %02x %02x %02x\n",*(((char*)ptr)), \
@@ -429,8 +429,6 @@ struct qdio_perf_stats {
429}; 429};
430#endif /* QDIO_PERFORMANCE_STATS */ 430#endif /* QDIO_PERFORMANCE_STATS */
431 431
432#define atomic_swap(a,b) xchg((int*)a.counter,b)
433
434/* unlikely as the later the better */ 432/* unlikely as the later the better */
435#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) 433#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
436#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ 434#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 79d89c368919..6a54334ffe09 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -431,7 +431,15 @@ static int ap_uevent (struct device *dev, char **envp, int num_envp,
431 ap_dev->device_type); 431 ap_dev->device_type);
432 if (buffer_size - length <= 0) 432 if (buffer_size - length <= 0)
433 return -ENOMEM; 433 return -ENOMEM;
434 envp[1] = 0; 434 buffer += length;
435 buffer_size -= length;
436 /* Add MODALIAS= */
437 envp[1] = buffer;
438 length = scnprintf(buffer, buffer_size, "MODALIAS=ap:t%02X",
439 ap_dev->device_type);
440 if (buffer_size - length <= 0)
441 return -ENOMEM;
442 envp[2] = NULL;
435 return 0; 443 return 0;
436} 444}
437 445
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 66a8aec6efa6..08d4e47070bd 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -54,6 +54,8 @@
54#error Cannot compile lcs.c without some net devices switched on. 54#error Cannot compile lcs.c without some net devices switched on.
55#endif 55#endif
56 56
57#define PRINTK_HEADER " lcs: "
58
57/** 59/**
58 * initialization string for output 60 * initialization string for output
59 */ 61 */
@@ -120,7 +122,7 @@ lcs_alloc_channel(struct lcs_channel *channel)
120 kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); 122 kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
121 if (channel->iob[cnt].data == NULL) 123 if (channel->iob[cnt].data == NULL)
122 break; 124 break;
123 channel->iob[cnt].state = BUF_STATE_EMPTY; 125 channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
124 } 126 }
125 if (cnt < LCS_NUM_BUFFS) { 127 if (cnt < LCS_NUM_BUFFS) {
126 /* Not all io buffers could be allocated. */ 128 /* Not all io buffers could be allocated. */
@@ -236,7 +238,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
236 ((struct lcs_header *) 238 ((struct lcs_header *)
237 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; 239 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
238 card->read.iob[cnt].callback = lcs_get_frames_cb; 240 card->read.iob[cnt].callback = lcs_get_frames_cb;
239 card->read.iob[cnt].state = BUF_STATE_READY; 241 card->read.iob[cnt].state = LCS_BUF_STATE_READY;
240 card->read.iob[cnt].count = LCS_IOBUFFERSIZE; 242 card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
241 } 243 }
242 card->read.ccws[0].flags &= ~CCW_FLAG_PCI; 244 card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
@@ -247,7 +249,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
247 card->read.ccws[LCS_NUM_BUFFS].cda = 249 card->read.ccws[LCS_NUM_BUFFS].cda =
248 (__u32) __pa(card->read.ccws); 250 (__u32) __pa(card->read.ccws);
249 /* Setg initial state of the read channel. */ 251 /* Setg initial state of the read channel. */
250 card->read.state = CH_STATE_INIT; 252 card->read.state = LCS_CH_STATE_INIT;
251 253
252 card->read.io_idx = 0; 254 card->read.io_idx = 0;
253 card->read.buf_idx = 0; 255 card->read.buf_idx = 0;
@@ -294,7 +296,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
294 card->write.ccws[LCS_NUM_BUFFS].cda = 296 card->write.ccws[LCS_NUM_BUFFS].cda =
295 (__u32) __pa(card->write.ccws); 297 (__u32) __pa(card->write.ccws);
296 /* Set initial state of the write channel. */ 298 /* Set initial state of the write channel. */
297 card->read.state = CH_STATE_INIT; 299 card->read.state = LCS_CH_STATE_INIT;
298 300
299 card->write.io_idx = 0; 301 card->write.io_idx = 0;
300 card->write.buf_idx = 0; 302 card->write.buf_idx = 0;
@@ -496,7 +498,7 @@ lcs_start_channel(struct lcs_channel *channel)
496 channel->ccws + channel->io_idx, 0, 0, 498 channel->ccws + channel->io_idx, 0, 0,
497 DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); 499 DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
498 if (rc == 0) 500 if (rc == 0)
499 channel->state = CH_STATE_RUNNING; 501 channel->state = LCS_CH_STATE_RUNNING;
500 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 502 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
501 if (rc) { 503 if (rc) {
502 LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id); 504 LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
@@ -520,8 +522,8 @@ lcs_clear_channel(struct lcs_channel *channel)
520 LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id); 522 LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
521 return rc; 523 return rc;
522 } 524 }
523 wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED)); 525 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
524 channel->state = CH_STATE_STOPPED; 526 channel->state = LCS_CH_STATE_STOPPED;
525 return rc; 527 return rc;
526} 528}
527 529
@@ -535,11 +537,11 @@ lcs_stop_channel(struct lcs_channel *channel)
535 unsigned long flags; 537 unsigned long flags;
536 int rc; 538 int rc;
537 539
538 if (channel->state == CH_STATE_STOPPED) 540 if (channel->state == LCS_CH_STATE_STOPPED)
539 return 0; 541 return 0;
540 LCS_DBF_TEXT(4,trace,"haltsch"); 542 LCS_DBF_TEXT(4,trace,"haltsch");
541 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id); 543 LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
542 channel->state = CH_STATE_INIT; 544 channel->state = LCS_CH_STATE_INIT;
543 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 545 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
544 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); 546 rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
545 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 547 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -548,7 +550,7 @@ lcs_stop_channel(struct lcs_channel *channel)
548 return rc; 550 return rc;
549 } 551 }
550 /* Asynchronous halt initialted. Wait for its completion. */ 552 /* Asynchronous halt initialted. Wait for its completion. */
551 wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED)); 553 wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
552 lcs_clear_channel(channel); 554 lcs_clear_channel(channel);
553 return 0; 555 return 0;
554} 556}
@@ -596,8 +598,8 @@ __lcs_get_buffer(struct lcs_channel *channel)
596 LCS_DBF_TEXT(5, trace, "_getbuff"); 598 LCS_DBF_TEXT(5, trace, "_getbuff");
597 index = channel->io_idx; 599 index = channel->io_idx;
598 do { 600 do {
599 if (channel->iob[index].state == BUF_STATE_EMPTY) { 601 if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
600 channel->iob[index].state = BUF_STATE_LOCKED; 602 channel->iob[index].state = LCS_BUF_STATE_LOCKED;
601 return channel->iob + index; 603 return channel->iob + index;
602 } 604 }
603 index = (index + 1) & (LCS_NUM_BUFFS - 1); 605 index = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -626,7 +628,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
626{ 628{
627 int rc; 629 int rc;
628 630
629 if (channel->state != CH_STATE_SUSPENDED) 631 if (channel->state != LCS_CH_STATE_SUSPENDED)
630 return 0; 632 return 0;
631 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) 633 if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
632 return 0; 634 return 0;
@@ -636,7 +638,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
636 LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id); 638 LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
637 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); 639 PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
638 } else 640 } else
639 channel->state = CH_STATE_RUNNING; 641 channel->state = LCS_CH_STATE_RUNNING;
640 return rc; 642 return rc;
641 643
642} 644}
@@ -670,10 +672,10 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
670 int index, rc; 672 int index, rc;
671 673
672 LCS_DBF_TEXT(5, trace, "rdybuff"); 674 LCS_DBF_TEXT(5, trace, "rdybuff");
673 BUG_ON(buffer->state != BUF_STATE_LOCKED && 675 BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
674 buffer->state != BUF_STATE_PROCESSED); 676 buffer->state != LCS_BUF_STATE_PROCESSED);
675 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 677 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
676 buffer->state = BUF_STATE_READY; 678 buffer->state = LCS_BUF_STATE_READY;
677 index = buffer - channel->iob; 679 index = buffer - channel->iob;
678 /* Set length. */ 680 /* Set length. */
679 channel->ccws[index].count = buffer->count; 681 channel->ccws[index].count = buffer->count;
@@ -695,8 +697,8 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
695 int index, prev, next; 697 int index, prev, next;
696 698
697 LCS_DBF_TEXT(5, trace, "prcsbuff"); 699 LCS_DBF_TEXT(5, trace, "prcsbuff");
698 BUG_ON(buffer->state != BUF_STATE_READY); 700 BUG_ON(buffer->state != LCS_BUF_STATE_READY);
699 buffer->state = BUF_STATE_PROCESSED; 701 buffer->state = LCS_BUF_STATE_PROCESSED;
700 index = buffer - channel->iob; 702 index = buffer - channel->iob;
701 prev = (index - 1) & (LCS_NUM_BUFFS - 1); 703 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
702 next = (index + 1) & (LCS_NUM_BUFFS - 1); 704 next = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -704,7 +706,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
704 channel->ccws[index].flags |= CCW_FLAG_SUSPEND; 706 channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
705 channel->ccws[index].flags &= ~CCW_FLAG_PCI; 707 channel->ccws[index].flags &= ~CCW_FLAG_PCI;
706 /* Check the suspend bit of the previous buffer. */ 708 /* Check the suspend bit of the previous buffer. */
707 if (channel->iob[prev].state == BUF_STATE_READY) { 709 if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
708 /* 710 /*
709 * Previous buffer is in state ready. It might have 711 * Previous buffer is in state ready. It might have
710 * happened in lcs_ready_buffer that the suspend bit 712 * happened in lcs_ready_buffer that the suspend bit
@@ -727,10 +729,10 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
727 unsigned long flags; 729 unsigned long flags;
728 730
729 LCS_DBF_TEXT(5, trace, "relbuff"); 731 LCS_DBF_TEXT(5, trace, "relbuff");
730 BUG_ON(buffer->state != BUF_STATE_LOCKED && 732 BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
731 buffer->state != BUF_STATE_PROCESSED); 733 buffer->state != LCS_BUF_STATE_PROCESSED);
732 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 734 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
733 buffer->state = BUF_STATE_EMPTY; 735 buffer->state = LCS_BUF_STATE_EMPTY;
734 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 736 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
735} 737}
736 738
@@ -1264,7 +1266,7 @@ lcs_register_mc_addresses(void *data)
1264 netif_carrier_off(card->dev); 1266 netif_carrier_off(card->dev);
1265 netif_tx_disable(card->dev); 1267 netif_tx_disable(card->dev);
1266 wait_event(card->write.wait_q, 1268 wait_event(card->write.wait_q,
1267 (card->write.state != CH_STATE_RUNNING)); 1269 (card->write.state != LCS_CH_STATE_RUNNING));
1268 lcs_fix_multicast_list(card); 1270 lcs_fix_multicast_list(card);
1269 if (card->state == DEV_STATE_UP) { 1271 if (card->state == DEV_STATE_UP) {
1270 netif_carrier_on(card->dev); 1272 netif_carrier_on(card->dev);
@@ -1404,7 +1406,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1404 } 1406 }
1405 } 1407 }
1406 /* How far in the ccw chain have we processed? */ 1408 /* How far in the ccw chain have we processed? */
1407 if ((channel->state != CH_STATE_INIT) && 1409 if ((channel->state != LCS_CH_STATE_INIT) &&
1408 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1410 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
1409 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1411 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
1410 - channel->ccws; 1412 - channel->ccws;
@@ -1424,20 +1426,20 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1424 (irb->scsw.dstat & DEV_STAT_CHN_END) || 1426 (irb->scsw.dstat & DEV_STAT_CHN_END) ||
1425 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) 1427 (irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
1426 /* Mark channel as stopped. */ 1428 /* Mark channel as stopped. */
1427 channel->state = CH_STATE_STOPPED; 1429 channel->state = LCS_CH_STATE_STOPPED;
1428 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) 1430 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
1429 /* CCW execution stopped on a suspend bit. */ 1431 /* CCW execution stopped on a suspend bit. */
1430 channel->state = CH_STATE_SUSPENDED; 1432 channel->state = LCS_CH_STATE_SUSPENDED;
1431 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1433 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1432 if (irb->scsw.cc != 0) { 1434 if (irb->scsw.cc != 0) {
1433 ccw_device_halt(channel->ccwdev, (addr_t) channel); 1435 ccw_device_halt(channel->ccwdev, (addr_t) channel);
1434 return; 1436 return;
1435 } 1437 }
1436 /* The channel has been stopped by halt_IO. */ 1438 /* The channel has been stopped by halt_IO. */
1437 channel->state = CH_STATE_HALTED; 1439 channel->state = LCS_CH_STATE_HALTED;
1438 } 1440 }
1439 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1441 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1440 channel->state = CH_STATE_CLEARED; 1442 channel->state = LCS_CH_STATE_CLEARED;
1441 } 1443 }
1442 /* Do the rest in the tasklet. */ 1444 /* Do the rest in the tasklet. */
1443 tasklet_schedule(&channel->irq_tasklet); 1445 tasklet_schedule(&channel->irq_tasklet);
@@ -1461,7 +1463,7 @@ lcs_tasklet(unsigned long data)
1461 /* Check for processed buffers. */ 1463 /* Check for processed buffers. */
1462 iob = channel->iob; 1464 iob = channel->iob;
1463 buf_idx = channel->buf_idx; 1465 buf_idx = channel->buf_idx;
1464 while (iob[buf_idx].state == BUF_STATE_PROCESSED) { 1466 while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
1465 /* Do the callback thing. */ 1467 /* Do the callback thing. */
1466 if (iob[buf_idx].callback != NULL) 1468 if (iob[buf_idx].callback != NULL)
1467 iob[buf_idx].callback(channel, iob + buf_idx); 1469 iob[buf_idx].callback(channel, iob + buf_idx);
@@ -1469,12 +1471,12 @@ lcs_tasklet(unsigned long data)
1469 } 1471 }
1470 channel->buf_idx = buf_idx; 1472 channel->buf_idx = buf_idx;
1471 1473
1472 if (channel->state == CH_STATE_STOPPED) 1474 if (channel->state == LCS_CH_STATE_STOPPED)
1473 // FIXME: what if rc != 0 ?? 1475 // FIXME: what if rc != 0 ??
1474 rc = lcs_start_channel(channel); 1476 rc = lcs_start_channel(channel);
1475 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1477 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1476 if (channel->state == CH_STATE_SUSPENDED && 1478 if (channel->state == LCS_CH_STATE_SUSPENDED &&
1477 channel->iob[channel->io_idx].state == BUF_STATE_READY) { 1479 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
1478 // FIXME: what if rc != 0 ?? 1480 // FIXME: what if rc != 0 ??
1479 rc = __lcs_resume_channel(channel); 1481 rc = __lcs_resume_channel(channel);
1480 } 1482 }
@@ -1689,8 +1691,8 @@ lcs_detect(struct lcs_card *card)
1689 card->state = DEV_STATE_UP; 1691 card->state = DEV_STATE_UP;
1690 } else { 1692 } else {
1691 card->state = DEV_STATE_DOWN; 1693 card->state = DEV_STATE_DOWN;
1692 card->write.state = CH_STATE_INIT; 1694 card->write.state = LCS_CH_STATE_INIT;
1693 card->read.state = CH_STATE_INIT; 1695 card->read.state = LCS_CH_STATE_INIT;
1694 } 1696 }
1695 return rc; 1697 return rc;
1696} 1698}
@@ -1705,8 +1707,8 @@ lcs_stopcard(struct lcs_card *card)
1705 1707
1706 LCS_DBF_TEXT(3, setup, "stopcard"); 1708 LCS_DBF_TEXT(3, setup, "stopcard");
1707 1709
1708 if (card->read.state != CH_STATE_STOPPED && 1710 if (card->read.state != LCS_CH_STATE_STOPPED &&
1709 card->write.state != CH_STATE_STOPPED && 1711 card->write.state != LCS_CH_STATE_STOPPED &&
1710 card->state == DEV_STATE_UP) { 1712 card->state == DEV_STATE_UP) {
1711 lcs_clear_multicast_list(card); 1713 lcs_clear_multicast_list(card);
1712 rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); 1714 rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -1871,7 +1873,7 @@ lcs_stop_device(struct net_device *dev)
1871 netif_tx_disable(dev); 1873 netif_tx_disable(dev);
1872 dev->flags &= ~IFF_UP; 1874 dev->flags &= ~IFF_UP;
1873 wait_event(card->write.wait_q, 1875 wait_event(card->write.wait_q,
1874 (card->write.state != CH_STATE_RUNNING)); 1876 (card->write.state != LCS_CH_STATE_RUNNING));
1875 rc = lcs_stopcard(card); 1877 rc = lcs_stopcard(card);
1876 if (rc) 1878 if (rc)
1877 PRINT_ERR("Try it again!\n "); 1879 PRINT_ERR("Try it again!\n ");
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index b5247dc08b57..0e1e4a0a88f0 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -23,11 +23,6 @@ do { \
23} while (0) 23} while (0)
24 24
25/** 25/**
26 * some more definitions for debug or output stuff
27 */
28#define PRINTK_HEADER " lcs: "
29
30/**
31 * sysfs related stuff 26 * sysfs related stuff
32 */ 27 */
33#define CARD_FROM_DEV(cdev) \ 28#define CARD_FROM_DEV(cdev) \
@@ -127,22 +122,22 @@ do { \
127 * LCS Buffer states 122 * LCS Buffer states
128 */ 123 */
129enum lcs_buffer_states { 124enum lcs_buffer_states {
130 BUF_STATE_EMPTY, /* buffer is empty */ 125 LCS_BUF_STATE_EMPTY, /* buffer is empty */
131 BUF_STATE_LOCKED, /* buffer is locked, don't touch */ 126 LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
132 BUF_STATE_READY, /* buffer is ready for read/write */ 127 LCS_BUF_STATE_READY, /* buffer is ready for read/write */
133 BUF_STATE_PROCESSED, 128 LCS_BUF_STATE_PROCESSED,
134}; 129};
135 130
136/** 131/**
137 * LCS Channel State Machine declarations 132 * LCS Channel State Machine declarations
138 */ 133 */
139enum lcs_channel_states { 134enum lcs_channel_states {
140 CH_STATE_INIT, 135 LCS_CH_STATE_INIT,
141 CH_STATE_HALTED, 136 LCS_CH_STATE_HALTED,
142 CH_STATE_STOPPED, 137 LCS_CH_STATE_STOPPED,
143 CH_STATE_RUNNING, 138 LCS_CH_STATE_RUNNING,
144 CH_STATE_SUSPENDED, 139 LCS_CH_STATE_SUSPENDED,
145 CH_STATE_CLEARED, 140 LCS_CH_STATE_CLEARED,
146}; 141};
147 142
148/** 143/**
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 821383d8cbe7..53c358c7d368 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -151,8 +151,6 @@ qeth_hex_dump(unsigned char *buf, size_t len)
151#define SENSE_RESETTING_EVENT_BYTE 1 151#define SENSE_RESETTING_EVENT_BYTE 1
152#define SENSE_RESETTING_EVENT_FLAG 0x80 152#define SENSE_RESETTING_EVENT_FLAG 0x80
153 153
154#define atomic_swap(a,b) xchg((int *)a.counter, b)
155
156/* 154/*
157 * Common IO related definitions 155 * Common IO related definitions
158 */ 156 */
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 8364d5475ac7..7fdc5272c446 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2982,7 +2982,7 @@ qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2982 */ 2982 */
2983 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 2983 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
2984 !atomic_read(&queue->set_pci_flags_count)){ 2984 !atomic_read(&queue->set_pci_flags_count)){
2985 if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == 2985 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
2986 QETH_OUT_Q_UNLOCKED) { 2986 QETH_OUT_Q_UNLOCKED) {
2987 /* 2987 /*
2988 * If we get in here, there was no action in 2988 * If we get in here, there was no action in
@@ -3245,7 +3245,7 @@ qeth_free_qdio_buffers(struct qeth_card *card)
3245 int i, j; 3245 int i, j;
3246 3246
3247 QETH_DBF_TEXT(trace, 2, "freeqdbf"); 3247 QETH_DBF_TEXT(trace, 2, "freeqdbf");
3248 if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 3248 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
3249 QETH_QDIO_UNINITIALIZED) 3249 QETH_QDIO_UNINITIALIZED)
3250 return; 3250 return;
3251 kfree(card->qdio.in_q); 3251 kfree(card->qdio.in_q);
@@ -4366,7 +4366,7 @@ out:
4366 if (flush_count) 4366 if (flush_count)
4367 qeth_flush_buffers(queue, 0, start_index, flush_count); 4367 qeth_flush_buffers(queue, 0, start_index, flush_count);
4368 else if (!atomic_read(&queue->set_pci_flags_count)) 4368 else if (!atomic_read(&queue->set_pci_flags_count))
4369 atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 4369 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4370 /* 4370 /*
4371 * queue->state will go from LOCKED -> UNLOCKED or from 4371 * queue->state will go from LOCKED -> UNLOCKED or from
4372 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us 4372 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b5b0c2cba96b..5c0b75bbfa10 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/jiffies.h>
28#include <linux/err.h> 29#include <linux/err.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/string.h> 31#include <linux/string.h>
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index b691d3e14754..787a8f134677 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -282,7 +282,7 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
282} 282}
283 283
284/* Setup any dynamic params in the uart desc */ 284/* Setup any dynamic params in the uart desc */
285int cpm_uart_init_portdesc(void) 285int __init cpm_uart_init_portdesc(void)
286{ 286{
287#if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2) 287#if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2)
288 u32 addr; 288 u32 addr;
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 4f80c5b4a753..6dd579ed9777 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/serial/mpc52xx_uart.c
3 *
4 * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs. 2 * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
5 * 3 *
6 * FIXME According to the usermanual the status bits in the status register 4 * FIXME According to the usermanual the status bits in the status register
@@ -14,18 +12,20 @@
14 * 12 *
15 * 13 *
16 * Maintainer : Sylvain Munaut <tnt@246tNt.com> 14 * Maintainer : Sylvain Munaut <tnt@246tNt.com>
17 * 15 *
18 * Some of the code has been inspired/copied from the 2.4 code written 16 * Some of the code has been inspired/copied from the 2.4 code written
19 * by Dale Farnsworth <dfarnsworth@mvista.com>. 17 * by Dale Farnsworth <dfarnsworth@mvista.com>.
20 * 18 *
21 * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com> 19 * Copyright (C) 2006 Secret Lab Technologies Ltd.
20 * Grant Likely <grant.likely@secretlab.ca>
21 * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
22 * Copyright (C) 2003 MontaVista, Software, Inc. 22 * Copyright (C) 2003 MontaVista, Software, Inc.
23 * 23 *
24 * This file is licensed under the terms of the GNU General Public License 24 * This file is licensed under the terms of the GNU General Public License
25 * version 2. This program is licensed "as is" without any warranty of any 25 * version 2. This program is licensed "as is" without any warranty of any
26 * kind, whether express or implied. 26 * kind, whether express or implied.
27 */ 27 */
28 28
29/* Platform device Usage : 29/* Platform device Usage :
30 * 30 *
31 * Since PSCs can have multiple function, the correct driver for each one 31 * Since PSCs can have multiple function, the correct driver for each one
@@ -44,7 +44,24 @@
44 * will be mapped to. 44 * will be mapped to.
45 */ 45 */
46 46
47#include <linux/platform_device.h> 47/* OF Platform device Usage :
48 *
49 * This driver is only used for PSCs configured in uart mode. The device
50 * tree will have a node for each PSC in uart mode w/ device_type = "serial"
51 * and "mpc52xx-psc-uart" in the compatible string
52 *
53 * By default, PSC devices are enumerated in the order they are found. However
54 * a particular PSC number can be forces by adding 'device_no = <port#>'
55 * to the device node.
56 *
57 * The driver init all necessary registers to place the PSC in uart mode without
58 * DCD. However, the pin multiplexing aren't changed and should be set either
59 * by the bootloader or in the platform init code.
60 */
61
62#undef DEBUG
63
64#include <linux/device.h>
48#include <linux/module.h> 65#include <linux/module.h>
49#include <linux/tty.h> 66#include <linux/tty.h>
50#include <linux/serial.h> 67#include <linux/serial.h>
@@ -54,6 +71,12 @@
54#include <asm/delay.h> 71#include <asm/delay.h>
55#include <asm/io.h> 72#include <asm/io.h>
56 73
74#if defined(CONFIG_PPC_MERGE)
75#include <asm/of_platform.h>
76#else
77#include <linux/platform_device.h>
78#endif
79
57#include <asm/mpc52xx.h> 80#include <asm/mpc52xx.h>
58#include <asm/mpc52xx_psc.h> 81#include <asm/mpc52xx_psc.h>
59 82
@@ -80,6 +103,12 @@ static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
80 * it's cleared, then a memset(...,0,...) should be added to 103 * it's cleared, then a memset(...,0,...) should be added to
81 * the console_init 104 * the console_init
82 */ 105 */
106#if defined(CONFIG_PPC_MERGE)
107/* lookup table for matching device nodes to index numbers */
108static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
109
110static void mpc52xx_uart_of_enumerate(void);
111#endif
83 112
84#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase)) 113#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
85 114
@@ -96,32 +125,40 @@ static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id);
96#define uart_console(port) (0) 125#define uart_console(port) (0)
97#endif 126#endif
98 127
128#if defined(CONFIG_PPC_MERGE)
129static struct of_device_id mpc52xx_uart_of_match[] = {
130 { .type = "serial", .compatible = "mpc52xx-psc-uart", },
131 { .type = "serial", .compatible = "mpc5200-psc", }, /* Efika only! */
132 {},
133};
134#endif
135
99 136
100/* ======================================================================== */ 137/* ======================================================================== */
101/* UART operations */ 138/* UART operations */
102/* ======================================================================== */ 139/* ======================================================================== */
103 140
104static unsigned int 141static unsigned int
105mpc52xx_uart_tx_empty(struct uart_port *port) 142mpc52xx_uart_tx_empty(struct uart_port *port)
106{ 143{
107 int status = in_be16(&PSC(port)->mpc52xx_psc_status); 144 int status = in_be16(&PSC(port)->mpc52xx_psc_status);
108 return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0; 145 return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
109} 146}
110 147
111static void 148static void
112mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) 149mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
113{ 150{
114 /* Not implemented */ 151 /* Not implemented */
115} 152}
116 153
117static unsigned int 154static unsigned int
118mpc52xx_uart_get_mctrl(struct uart_port *port) 155mpc52xx_uart_get_mctrl(struct uart_port *port)
119{ 156{
120 /* Not implemented */ 157 /* Not implemented */
121 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; 158 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
122} 159}
123 160
124static void 161static void
125mpc52xx_uart_stop_tx(struct uart_port *port) 162mpc52xx_uart_stop_tx(struct uart_port *port)
126{ 163{
127 /* port->lock taken by caller */ 164 /* port->lock taken by caller */
@@ -129,7 +166,7 @@ mpc52xx_uart_stop_tx(struct uart_port *port)
129 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); 166 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
130} 167}
131 168
132static void 169static void
133mpc52xx_uart_start_tx(struct uart_port *port) 170mpc52xx_uart_start_tx(struct uart_port *port)
134{ 171{
135 /* port->lock taken by caller */ 172 /* port->lock taken by caller */
@@ -137,12 +174,12 @@ mpc52xx_uart_start_tx(struct uart_port *port)
137 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); 174 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
138} 175}
139 176
140static void 177static void
141mpc52xx_uart_send_xchar(struct uart_port *port, char ch) 178mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
142{ 179{
143 unsigned long flags; 180 unsigned long flags;
144 spin_lock_irqsave(&port->lock, flags); 181 spin_lock_irqsave(&port->lock, flags);
145 182
146 port->x_char = ch; 183 port->x_char = ch;
147 if (ch) { 184 if (ch) {
148 /* Make sure tx interrupts are on */ 185 /* Make sure tx interrupts are on */
@@ -150,7 +187,7 @@ mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
150 port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; 187 port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
151 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); 188 out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
152 } 189 }
153 190
154 spin_unlock_irqrestore(&port->lock, flags); 191 spin_unlock_irqrestore(&port->lock, flags);
155} 192}
156 193
@@ -178,7 +215,7 @@ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
178 out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK); 215 out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
179 else 216 else
180 out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK); 217 out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
181 218
182 spin_unlock_irqrestore(&port->lock, flags); 219 spin_unlock_irqrestore(&port->lock, flags);
183} 220}
184 221
@@ -197,11 +234,11 @@ mpc52xx_uart_startup(struct uart_port *port)
197 /* Reset/activate the port, clear and enable interrupts */ 234 /* Reset/activate the port, clear and enable interrupts */
198 out_8(&psc->command,MPC52xx_PSC_RST_RX); 235 out_8(&psc->command,MPC52xx_PSC_RST_RX);
199 out_8(&psc->command,MPC52xx_PSC_RST_TX); 236 out_8(&psc->command,MPC52xx_PSC_RST_TX);
200 237
201 out_be32(&psc->sicr,0); /* UART mode DCD ignored */ 238 out_be32(&psc->sicr,0); /* UART mode DCD ignored */
202 239
203 out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */ 240 out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
204 241
205 out_8(&psc->rfcntl, 0x00); 242 out_8(&psc->rfcntl, 0x00);
206 out_be16(&psc->rfalarm, 0x1ff); 243 out_be16(&psc->rfalarm, 0x1ff);
207 out_8(&psc->tfcntl, 0x07); 244 out_8(&psc->tfcntl, 0x07);
@@ -209,10 +246,10 @@ mpc52xx_uart_startup(struct uart_port *port)
209 246
210 port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY; 247 port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
211 out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask); 248 out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
212 249
213 out_8(&psc->command,MPC52xx_PSC_TX_ENABLE); 250 out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
214 out_8(&psc->command,MPC52xx_PSC_RX_ENABLE); 251 out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
215 252
216 return 0; 253 return 0;
217} 254}
218 255
@@ -220,19 +257,19 @@ static void
220mpc52xx_uart_shutdown(struct uart_port *port) 257mpc52xx_uart_shutdown(struct uart_port *port)
221{ 258{
222 struct mpc52xx_psc __iomem *psc = PSC(port); 259 struct mpc52xx_psc __iomem *psc = PSC(port);
223 260
224 /* Shut down the port, interrupt and all */ 261 /* Shut down the port, interrupt and all */
225 out_8(&psc->command,MPC52xx_PSC_RST_RX); 262 out_8(&psc->command,MPC52xx_PSC_RST_RX);
226 out_8(&psc->command,MPC52xx_PSC_RST_TX); 263 out_8(&psc->command,MPC52xx_PSC_RST_TX);
227 264
228 port->read_status_mask = 0; 265 port->read_status_mask = 0;
229 out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask); 266 out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
230 267
231 /* Release interrupt */ 268 /* Release interrupt */
232 free_irq(port->irq, port); 269 free_irq(port->irq, port);
233} 270}
234 271
235static void 272static void
236mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new, 273mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
237 struct termios *old) 274 struct termios *old)
238{ 275{
@@ -241,10 +278,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
241 unsigned char mr1, mr2; 278 unsigned char mr1, mr2;
242 unsigned short ctr; 279 unsigned short ctr;
243 unsigned int j, baud, quot; 280 unsigned int j, baud, quot;
244 281
245 /* Prepare what we're gonna write */ 282 /* Prepare what we're gonna write */
246 mr1 = 0; 283 mr1 = 0;
247 284
248 switch (new->c_cflag & CSIZE) { 285 switch (new->c_cflag & CSIZE) {
249 case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS; 286 case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS;
250 break; 287 break;
@@ -261,8 +298,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
261 MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN; 298 MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
262 } else 299 } else
263 mr1 |= MPC52xx_PSC_MODE_PARNONE; 300 mr1 |= MPC52xx_PSC_MODE_PARNONE;
264 301
265 302
266 mr2 = 0; 303 mr2 = 0;
267 304
268 if (new->c_cflag & CSTOPB) 305 if (new->c_cflag & CSTOPB)
@@ -276,7 +313,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
276 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16); 313 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
277 quot = uart_get_divisor(port, baud); 314 quot = uart_get_divisor(port, baud);
278 ctr = quot & 0xffff; 315 ctr = quot & 0xffff;
279 316
280 /* Get the lock */ 317 /* Get the lock */
281 spin_lock_irqsave(&port->lock, flags); 318 spin_lock_irqsave(&port->lock, flags);
282 319
@@ -290,14 +327,14 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
290 * boot for the console, all stuff is not yet ready to receive at that 327 * boot for the console, all stuff is not yet ready to receive at that
291 * time and that just makes the kernel oops */ 328 * time and that just makes the kernel oops */
292 /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */ 329 /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
293 while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 330 while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
294 --j) 331 --j)
295 udelay(1); 332 udelay(1);
296 333
297 if (!j) 334 if (!j)
298 printk( KERN_ERR "mpc52xx_uart.c: " 335 printk( KERN_ERR "mpc52xx_uart.c: "
299 "Unable to flush RX & TX fifos in-time in set_termios." 336 "Unable to flush RX & TX fifos in-time in set_termios."
300 "Some chars may have been lost.\n" ); 337 "Some chars may have been lost.\n" );
301 338
302 /* Reset the TX & RX */ 339 /* Reset the TX & RX */
303 out_8(&psc->command,MPC52xx_PSC_RST_RX); 340 out_8(&psc->command,MPC52xx_PSC_RST_RX);
@@ -309,7 +346,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
309 out_8(&psc->mode,mr2); 346 out_8(&psc->mode,mr2);
310 out_8(&psc->ctur,ctr >> 8); 347 out_8(&psc->ctur,ctr >> 8);
311 out_8(&psc->ctlr,ctr & 0xff); 348 out_8(&psc->ctlr,ctr & 0xff);
312 349
313 /* Reenable TX & RX */ 350 /* Reenable TX & RX */
314 out_8(&psc->command,MPC52xx_PSC_TX_ENABLE); 351 out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
315 out_8(&psc->command,MPC52xx_PSC_RX_ENABLE); 352 out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
@@ -332,7 +369,7 @@ mpc52xx_uart_release_port(struct uart_port *port)
332 port->membase = NULL; 369 port->membase = NULL;
333 } 370 }
334 371
335 release_mem_region(port->mapbase, MPC52xx_PSC_SIZE); 372 release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
336} 373}
337 374
338static int 375static int
@@ -341,12 +378,13 @@ mpc52xx_uart_request_port(struct uart_port *port)
341 int err; 378 int err;
342 379
343 if (port->flags & UPF_IOREMAP) /* Need to remap ? */ 380 if (port->flags & UPF_IOREMAP) /* Need to remap ? */
344 port->membase = ioremap(port->mapbase, MPC52xx_PSC_SIZE); 381 port->membase = ioremap(port->mapbase,
382 sizeof(struct mpc52xx_psc));
345 383
346 if (!port->membase) 384 if (!port->membase)
347 return -EINVAL; 385 return -EINVAL;
348 386
349 err = request_mem_region(port->mapbase, MPC52xx_PSC_SIZE, 387 err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
350 "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY; 388 "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
351 389
352 if (err && (port->flags & UPF_IOREMAP)) { 390 if (err && (port->flags & UPF_IOREMAP)) {
@@ -373,7 +411,7 @@ mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
373 411
374 if ( (ser->irq != port->irq) || 412 if ( (ser->irq != port->irq) ||
375 (ser->io_type != SERIAL_IO_MEM) || 413 (ser->io_type != SERIAL_IO_MEM) ||
376 (ser->baud_base != port->uartclk) || 414 (ser->baud_base != port->uartclk) ||
377 (ser->iomem_base != (void*)port->mapbase) || 415 (ser->iomem_base != (void*)port->mapbase) ||
378 (ser->hub6 != 0 ) ) 416 (ser->hub6 != 0 ) )
379 return -EINVAL; 417 return -EINVAL;
@@ -404,11 +442,11 @@ static struct uart_ops mpc52xx_uart_ops = {
404 .verify_port = mpc52xx_uart_verify_port 442 .verify_port = mpc52xx_uart_verify_port
405}; 443};
406 444
407 445
408/* ======================================================================== */ 446/* ======================================================================== */
409/* Interrupt handling */ 447/* Interrupt handling */
410/* ======================================================================== */ 448/* ======================================================================== */
411 449
412static inline int 450static inline int
413mpc52xx_uart_int_rx_chars(struct uart_port *port) 451mpc52xx_uart_int_rx_chars(struct uart_port *port)
414{ 452{
@@ -435,11 +473,11 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
435 473
436 flag = TTY_NORMAL; 474 flag = TTY_NORMAL;
437 port->icount.rx++; 475 port->icount.rx++;
438 476
439 if ( status & (MPC52xx_PSC_SR_PE | 477 if ( status & (MPC52xx_PSC_SR_PE |
440 MPC52xx_PSC_SR_FE | 478 MPC52xx_PSC_SR_FE |
441 MPC52xx_PSC_SR_RB) ) { 479 MPC52xx_PSC_SR_RB) ) {
442 480
443 if (status & MPC52xx_PSC_SR_RB) { 481 if (status & MPC52xx_PSC_SR_RB) {
444 flag = TTY_BREAK; 482 flag = TTY_BREAK;
445 uart_handle_break(port); 483 uart_handle_break(port);
@@ -464,7 +502,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
464 } 502 }
465 503
466 tty_flip_buffer_push(tty); 504 tty_flip_buffer_push(tty);
467 505
468 return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY; 506 return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
469} 507}
470 508
@@ -509,25 +547,25 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
509 return 1; 547 return 1;
510} 548}
511 549
512static irqreturn_t 550static irqreturn_t
513mpc52xx_uart_int(int irq, void *dev_id) 551mpc52xx_uart_int(int irq, void *dev_id)
514{ 552{
515 struct uart_port *port = dev_id; 553 struct uart_port *port = dev_id;
516 unsigned long pass = ISR_PASS_LIMIT; 554 unsigned long pass = ISR_PASS_LIMIT;
517 unsigned int keepgoing; 555 unsigned int keepgoing;
518 unsigned short status; 556 unsigned short status;
519 557
520 spin_lock(&port->lock); 558 spin_lock(&port->lock);
521 559
522 /* While we have stuff to do, we continue */ 560 /* While we have stuff to do, we continue */
523 do { 561 do {
524 /* If we don't find anything to do, we stop */ 562 /* If we don't find anything to do, we stop */
525 keepgoing = 0; 563 keepgoing = 0;
526 564
527 /* Read status */ 565 /* Read status */
528 status = in_be16(&PSC(port)->mpc52xx_psc_isr); 566 status = in_be16(&PSC(port)->mpc52xx_psc_isr);
529 status &= port->read_status_mask; 567 status &= port->read_status_mask;
530 568
531 /* Do we need to receive chars ? */ 569 /* Do we need to receive chars ? */
532 /* For this RX interrupts must be on and some chars waiting */ 570 /* For this RX interrupts must be on and some chars waiting */
533 if ( status & MPC52xx_PSC_IMR_RXRDY ) 571 if ( status & MPC52xx_PSC_IMR_RXRDY )
@@ -537,15 +575,15 @@ mpc52xx_uart_int(int irq, void *dev_id)
537 /* For this, TX must be ready and TX interrupt enabled */ 575 /* For this, TX must be ready and TX interrupt enabled */
538 if ( status & MPC52xx_PSC_IMR_TXRDY ) 576 if ( status & MPC52xx_PSC_IMR_TXRDY )
539 keepgoing |= mpc52xx_uart_int_tx_chars(port); 577 keepgoing |= mpc52xx_uart_int_tx_chars(port);
540 578
541 /* Limit number of iteration */ 579 /* Limit number of iteration */
542 if ( !(--pass) ) 580 if ( !(--pass) )
543 keepgoing = 0; 581 keepgoing = 0;
544 582
545 } while (keepgoing); 583 } while (keepgoing);
546 584
547 spin_unlock(&port->lock); 585 spin_unlock(&port->lock);
548 586
549 return IRQ_HANDLED; 587 return IRQ_HANDLED;
550} 588}
551 589
@@ -563,13 +601,18 @@ mpc52xx_console_get_options(struct uart_port *port,
563 struct mpc52xx_psc __iomem *psc = PSC(port); 601 struct mpc52xx_psc __iomem *psc = PSC(port);
564 unsigned char mr1; 602 unsigned char mr1;
565 603
604 pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
605
566 /* Read the mode registers */ 606 /* Read the mode registers */
567 out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1); 607 out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
568 mr1 = in_8(&psc->mode); 608 mr1 = in_8(&psc->mode);
569 609
570 /* CT{U,L}R are write-only ! */ 610 /* CT{U,L}R are write-only ! */
571 *baud = __res.bi_baudrate ? 611 *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
572 __res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; 612#if !defined(CONFIG_PPC_MERGE)
613 if (__res.bi_baudrate)
614 *baud = __res.bi_baudrate;
615#endif
573 616
574 /* Parse them */ 617 /* Parse them */
575 switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) { 618 switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
@@ -579,26 +622,26 @@ mpc52xx_console_get_options(struct uart_port *port,
579 case MPC52xx_PSC_MODE_8_BITS: 622 case MPC52xx_PSC_MODE_8_BITS:
580 default: *bits = 8; 623 default: *bits = 8;
581 } 624 }
582 625
583 if (mr1 & MPC52xx_PSC_MODE_PARNONE) 626 if (mr1 & MPC52xx_PSC_MODE_PARNONE)
584 *parity = 'n'; 627 *parity = 'n';
585 else 628 else
586 *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e'; 629 *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
587} 630}
588 631
589static void 632static void
590mpc52xx_console_write(struct console *co, const char *s, unsigned int count) 633mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
591{ 634{
592 struct uart_port *port = &mpc52xx_uart_ports[co->index]; 635 struct uart_port *port = &mpc52xx_uart_ports[co->index];
593 struct mpc52xx_psc __iomem *psc = PSC(port); 636 struct mpc52xx_psc __iomem *psc = PSC(port);
594 unsigned int i, j; 637 unsigned int i, j;
595 638
596 /* Disable interrupts */ 639 /* Disable interrupts */
597 out_be16(&psc->mpc52xx_psc_imr, 0); 640 out_be16(&psc->mpc52xx_psc_imr, 0);
598 641
599 /* Wait the TX buffer to be empty */ 642 /* Wait the TX buffer to be empty */
600 j = 5000000; /* Maximum wait */ 643 j = 5000000; /* Maximum wait */
601 while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 644 while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
602 --j) 645 --j)
603 udelay(1); 646 udelay(1);
604 647
@@ -607,13 +650,13 @@ mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
607 /* Line return handling */ 650 /* Line return handling */
608 if (*s == '\n') 651 if (*s == '\n')
609 out_8(&psc->mpc52xx_psc_buffer_8, '\r'); 652 out_8(&psc->mpc52xx_psc_buffer_8, '\r');
610 653
611 /* Send the char */ 654 /* Send the char */
612 out_8(&psc->mpc52xx_psc_buffer_8, *s); 655 out_8(&psc->mpc52xx_psc_buffer_8, *s);
613 656
614 /* Wait the TX buffer to be empty */ 657 /* Wait the TX buffer to be empty */
615 j = 20000; /* Maximum wait */ 658 j = 20000; /* Maximum wait */
616 while (!(in_be16(&psc->mpc52xx_psc_status) & 659 while (!(in_be16(&psc->mpc52xx_psc_status) &
617 MPC52xx_PSC_SR_TXEMP) && --j) 660 MPC52xx_PSC_SR_TXEMP) && --j)
618 udelay(1); 661 udelay(1);
619 } 662 }
@@ -622,6 +665,7 @@ mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
622 out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); 665 out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
623} 666}
624 667
668#if !defined(CONFIG_PPC_MERGE)
625static int __init 669static int __init
626mpc52xx_console_setup(struct console *co, char *options) 670mpc52xx_console_setup(struct console *co, char *options)
627{ 671{
@@ -634,7 +678,7 @@ mpc52xx_console_setup(struct console *co, char *options)
634 678
635 if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM) 679 if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM)
636 return -EINVAL; 680 return -EINVAL;
637 681
638 /* Basic port init. Needed since we use some uart_??? func before 682 /* Basic port init. Needed since we use some uart_??? func before
639 * real init for early access */ 683 * real init for early access */
640 spin_lock_init(&port->lock); 684 spin_lock_init(&port->lock);
@@ -656,6 +700,78 @@ mpc52xx_console_setup(struct console *co, char *options)
656 return uart_set_options(port, co, baud, parity, bits, flow); 700 return uart_set_options(port, co, baud, parity, bits, flow);
657} 701}
658 702
703#else
704
705static int __init
706mpc52xx_console_setup(struct console *co, char *options)
707{
708 struct uart_port *port = &mpc52xx_uart_ports[co->index];
709 struct device_node *np = mpc52xx_uart_nodes[co->index];
710 unsigned int ipb_freq;
711 struct resource res;
712 int ret;
713
714 int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
715 int bits = 8;
716 int parity = 'n';
717 int flow = 'n';
718
719 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
720 co, co->index, options);
721
722 if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
723 pr_debug("PSC%x out of range\n", co->index);
724 return -EINVAL;
725 }
726
727 if (!np) {
728 pr_debug("PSC%x not found in device tree\n", co->index);
729 return -EINVAL;
730 }
731
732 pr_debug("Console on ttyPSC%x is %s\n",
733 co->index, mpc52xx_uart_nodes[co->index]->full_name);
734
735 /* Fetch register locations */
736 if ((ret = of_address_to_resource(np, 0, &res)) != 0) {
737 pr_debug("Could not get resources for PSC%x\n", co->index);
738 return ret;
739 }
740
741 /* Search for bus-frequency property in this node or a parent */
742 if ((ipb_freq = mpc52xx_find_ipb_freq(np)) == 0) {
743 pr_debug("Could not find IPB bus frequency!\n");
744 return -EINVAL;
745 }
746
747 /* Basic port init. Needed since we use some uart_??? func before
748 * real init for early access */
749 spin_lock_init(&port->lock);
750 port->uartclk = ipb_freq / 2;
751 port->ops = &mpc52xx_uart_ops;
752 port->mapbase = res.start;
753 port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
754 port->irq = irq_of_parse_and_map(np, 0);
755
756 if (port->membase == NULL)
757 return -EINVAL;
758
759 pr_debug("mpc52xx-psc uart at %lx, mapped to %p, irq=%x, freq=%i\n",
760 port->mapbase, port->membase, port->irq, port->uartclk);
761
762 /* Setup the port parameters accoding to options */
763 if (options)
764 uart_parse_options(options, &baud, &parity, &bits, &flow);
765 else
766 mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
767
768 pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
769 baud, bits, parity, flow);
770
771 return uart_set_options(port, co, baud, parity, bits, flow);
772}
773#endif /* defined(CONFIG_PPC_MERGE) */
774
659 775
660static struct uart_driver mpc52xx_uart_driver; 776static struct uart_driver mpc52xx_uart_driver;
661 777
@@ -669,10 +785,11 @@ static struct console mpc52xx_console = {
669 .data = &mpc52xx_uart_driver, 785 .data = &mpc52xx_uart_driver,
670}; 786};
671 787
672 788
673static int __init 789static int __init
674mpc52xx_console_init(void) 790mpc52xx_console_init(void)
675{ 791{
792 mpc52xx_uart_of_enumerate();
676 register_console(&mpc52xx_console); 793 register_console(&mpc52xx_console);
677 return 0; 794 return 0;
678} 795}
@@ -700,6 +817,7 @@ static struct uart_driver mpc52xx_uart_driver = {
700}; 817};
701 818
702 819
820#if !defined(CONFIG_PPC_MERGE)
703/* ======================================================================== */ 821/* ======================================================================== */
704/* Platform Driver */ 822/* Platform Driver */
705/* ======================================================================== */ 823/* ======================================================================== */
@@ -723,8 +841,6 @@ mpc52xx_uart_probe(struct platform_device *dev)
723 /* Init the port structure */ 841 /* Init the port structure */
724 port = &mpc52xx_uart_ports[idx]; 842 port = &mpc52xx_uart_ports[idx];
725 843
726 memset(port, 0x00, sizeof(struct uart_port));
727
728 spin_lock_init(&port->lock); 844 spin_lock_init(&port->lock);
729 port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */ 845 port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
730 port->fifosize = 512; 846 port->fifosize = 512;
@@ -733,6 +849,7 @@ mpc52xx_uart_probe(struct platform_device *dev)
733 ( uart_console(port) ? 0 : UPF_IOREMAP ); 849 ( uart_console(port) ? 0 : UPF_IOREMAP );
734 port->line = idx; 850 port->line = idx;
735 port->ops = &mpc52xx_uart_ops; 851 port->ops = &mpc52xx_uart_ops;
852 port->dev = &dev->dev;
736 853
737 /* Search for IRQ and mapbase */ 854 /* Search for IRQ and mapbase */
738 for (i=0 ; i<dev->num_resources ; i++, res++) { 855 for (i=0 ; i<dev->num_resources ; i++, res++) {
@@ -771,7 +888,7 @@ mpc52xx_uart_suspend(struct platform_device *dev, pm_message_t state)
771{ 888{
772 struct uart_port *port = (struct uart_port *) platform_get_drvdata(dev); 889 struct uart_port *port = (struct uart_port *) platform_get_drvdata(dev);
773 890
774 if (sport) 891 if (port)
775 uart_suspend_port(&mpc52xx_uart_driver, port); 892 uart_suspend_port(&mpc52xx_uart_driver, port);
776 893
777 return 0; 894 return 0;
@@ -789,6 +906,7 @@ mpc52xx_uart_resume(struct platform_device *dev)
789} 906}
790#endif 907#endif
791 908
909
792static struct platform_driver mpc52xx_uart_platform_driver = { 910static struct platform_driver mpc52xx_uart_platform_driver = {
793 .probe = mpc52xx_uart_probe, 911 .probe = mpc52xx_uart_probe,
794 .remove = mpc52xx_uart_remove, 912 .remove = mpc52xx_uart_remove,
@@ -800,6 +918,184 @@ static struct platform_driver mpc52xx_uart_platform_driver = {
800 .name = "mpc52xx-psc", 918 .name = "mpc52xx-psc",
801 }, 919 },
802}; 920};
921#endif /* !defined(CONFIG_PPC_MERGE) */
922
923
924#if defined(CONFIG_PPC_MERGE)
925/* ======================================================================== */
926/* OF Platform Driver */
927/* ======================================================================== */
928
929static int __devinit
930mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
931{
932 int idx = -1;
933 unsigned int ipb_freq;
934 struct uart_port *port = NULL;
935 struct resource res;
936 int ret;
937
938 dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match);
939
940 /* Check validity & presence */
941 for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
942 if (mpc52xx_uart_nodes[idx] == op->node)
943 break;
944 if (idx >= MPC52xx_PSC_MAXNUM)
945 return -EINVAL;
946 pr_debug("Found %s assigned to ttyPSC%x\n",
947 mpc52xx_uart_nodes[idx]->full_name, idx);
948
949 /* Search for bus-frequency property in this node or a parent */
950 if ((ipb_freq = mpc52xx_find_ipb_freq(op->node)) == 0) {
951 dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
952 return -EINVAL;
953 }
954
955 /* Init the port structure */
956 port = &mpc52xx_uart_ports[idx];
957
958 spin_lock_init(&port->lock);
959 port->uartclk = ipb_freq / 2;
960 port->fifosize = 512;
961 port->iotype = UPIO_MEM;
962 port->flags = UPF_BOOT_AUTOCONF |
963 ( uart_console(port) ? 0 : UPF_IOREMAP );
964 port->line = idx;
965 port->ops = &mpc52xx_uart_ops;
966 port->dev = &op->dev;
967
968 /* Search for IRQ and mapbase */
969 if ((ret = of_address_to_resource(op->node, 0, &res)) != 0)
970 return ret;
971
972 port->mapbase = res.start;
973 port->irq = irq_of_parse_and_map(op->node, 0);
974
975 dev_dbg(&op->dev, "mpc52xx-psc uart at %lx, irq=%x, freq=%i\n",
976 port->mapbase, port->irq, port->uartclk);
977
978 if ((port->irq==NO_IRQ) || !port->mapbase) {
979 printk(KERN_ERR "Could not allocate resources for PSC\n");
980 return -EINVAL;
981 }
982
983 /* Add the port to the uart sub-system */
984 ret = uart_add_one_port(&mpc52xx_uart_driver, port);
985 if (!ret)
986 dev_set_drvdata(&op->dev, (void*)port);
987
988 return ret;
989}
990
991static int
992mpc52xx_uart_of_remove(struct of_device *op)
993{
994 struct uart_port *port = dev_get_drvdata(&op->dev);
995 dev_set_drvdata(&op->dev, NULL);
996
997 if (port)
998 uart_remove_one_port(&mpc52xx_uart_driver, port);
999
1000 return 0;
1001}
1002
1003#ifdef CONFIG_PM
1004static int
1005mpc52xx_uart_of_suspend(struct of_device *op, pm_message_t state)
1006{
1007 struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
1008
1009 if (port)
1010 uart_suspend_port(&mpc52xx_uart_driver, port);
1011
1012 return 0;
1013}
1014
1015static int
1016mpc52xx_uart_of_resume(struct of_device *op)
1017{
1018 struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
1019
1020 if (port)
1021 uart_resume_port(&mpc52xx_uart_driver, port);
1022
1023 return 0;
1024}
1025#endif
1026
1027static void
1028mpc52xx_uart_of_assign(struct device_node *np, int idx)
1029{
1030 int free_idx = -1;
1031 int i;
1032
1033 /* Find the first free node */
1034 for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
1035 if (mpc52xx_uart_nodes[i] == NULL) {
1036 free_idx = i;
1037 break;
1038 }
1039 }
1040
1041 if ((idx < 0) || (idx >= MPC52xx_PSC_MAXNUM))
1042 idx = free_idx;
1043
1044 if (idx < 0)
1045 return; /* No free slot; abort */
1046
1047 /* If the slot is already occupied, then swap slots */
1048 if (mpc52xx_uart_nodes[idx] && (free_idx != -1))
1049 mpc52xx_uart_nodes[free_idx] = mpc52xx_uart_nodes[idx];
1050 mpc52xx_uart_nodes[i] = np;
1051}
1052
1053static void
1054mpc52xx_uart_of_enumerate(void)
1055{
1056 static int enum_done = 0;
1057 struct device_node *np;
1058 const unsigned int *devno;
1059 int i;
1060
1061 if (enum_done)
1062 return;
1063
1064 for_each_node_by_type(np, "serial") {
1065 if (!of_match_node(mpc52xx_uart_of_match, np))
1066 continue;
1067
1068 /* Is a particular device number requested? */
1069 devno = get_property(np, "device_no", NULL);
1070 mpc52xx_uart_of_assign(of_node_get(np), devno ? *devno : -1);
1071 }
1072
1073 enum_done = 1;
1074
1075 for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
1076 if (mpc52xx_uart_nodes[i])
1077 pr_debug("%s assigned to ttyPSC%x\n",
1078 mpc52xx_uart_nodes[i]->full_name, i);
1079 }
1080}
1081
1082MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
1083
1084static struct of_platform_driver mpc52xx_uart_of_driver = {
1085 .owner = THIS_MODULE,
1086 .name = "mpc52xx-psc-uart",
1087 .match_table = mpc52xx_uart_of_match,
1088 .probe = mpc52xx_uart_of_probe,
1089 .remove = mpc52xx_uart_of_remove,
1090#ifdef CONFIG_PM
1091 .suspend = mpc52xx_uart_of_suspend,
1092 .resume = mpc52xx_uart_of_resume,
1093#endif
1094 .driver = {
1095 .name = "mpc52xx-psc-uart",
1096 },
1097};
1098#endif /* defined(CONFIG_PPC_MERGE) */
803 1099
804 1100
805/* ======================================================================== */ 1101/* ======================================================================== */
@@ -811,22 +1107,45 @@ mpc52xx_uart_init(void)
811{ 1107{
812 int ret; 1108 int ret;
813 1109
814 printk(KERN_INFO "Serial: MPC52xx PSC driver\n"); 1110 printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
815 1111
816 ret = uart_register_driver(&mpc52xx_uart_driver); 1112 if ((ret = uart_register_driver(&mpc52xx_uart_driver)) != 0) {
817 if (ret == 0) { 1113 printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
818 ret = platform_driver_register(&mpc52xx_uart_platform_driver); 1114 __FILE__, ret);
819 if (ret) 1115 return ret;
820 uart_unregister_driver(&mpc52xx_uart_driver);
821 } 1116 }
822 1117
823 return ret; 1118#if defined(CONFIG_PPC_MERGE)
1119 mpc52xx_uart_of_enumerate();
1120
1121 ret = of_register_platform_driver(&mpc52xx_uart_of_driver);
1122 if (ret) {
1123 printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n",
1124 __FILE__, ret);
1125 uart_unregister_driver(&mpc52xx_uart_driver);
1126 return ret;
1127 }
1128#else
1129 ret = platform_driver_register(&mpc52xx_uart_platform_driver);
1130 if (ret) {
1131 printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
1132 __FILE__, ret);
1133 uart_unregister_driver(&mpc52xx_uart_driver);
1134 return ret;
1135 }
1136#endif
1137
1138 return 0;
824} 1139}
825 1140
826static void __exit 1141static void __exit
827mpc52xx_uart_exit(void) 1142mpc52xx_uart_exit(void)
828{ 1143{
1144#if defined(CONFIG_PPC_MERGE)
1145 of_unregister_platform_driver(&mpc52xx_uart_of_driver);
1146#else
829 platform_driver_unregister(&mpc52xx_uart_platform_driver); 1147 platform_driver_unregister(&mpc52xx_uart_platform_driver);
1148#endif
830 uart_unregister_driver(&mpc52xx_uart_driver); 1149 uart_unregister_driver(&mpc52xx_uart_driver);
831} 1150}
832 1151
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index 39d9b20f2038..c2f601f8e4f2 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -23,6 +23,7 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/parport.h> 24#include <linux/parport.h>
25 25
26#include <linux/sched.h>
26#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
27#include <linux/spi/spi_bitbang.h> 28#include <linux/spi/spi_bitbang.h>
28#include <linux/spi/flash.h> 29#include <linux/spi/flash.h>
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index fdb33cd21a27..cb26c6df0583 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -34,6 +34,7 @@
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/of_device.h> 36#include <asm/of_device.h>
37#include <asm/of_platform.h>
37 38
38#include "macmodes.h" 39#include "macmodes.h"
39#include "platinumfb.h" 40#include "platinumfb.h"
@@ -682,14 +683,14 @@ static int __init platinumfb_init(void)
682 return -ENODEV; 683 return -ENODEV;
683 platinumfb_setup(option); 684 platinumfb_setup(option);
684#endif 685#endif
685 of_register_driver(&platinum_driver); 686 of_register_platform_driver(&platinum_driver);
686 687
687 return 0; 688 return 0;
688} 689}
689 690
690static void __exit platinumfb_exit(void) 691static void __exit platinumfb_exit(void)
691{ 692{
692 of_unregister_driver(&platinum_driver); 693 of_unregister_platform_driver(&platinum_driver);
693} 694}
694 695
695MODULE_LICENSE("GPL"); 696MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 5372cfcbd054..b022fffd8c51 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -24,6 +24,7 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
27#include <linux/sched.h>
27#include <linux/device.h> 28#include <linux/device.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <linux/delay.h> 30#include <linux/delay.h>