aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ide/Kconfig8
-rw-r--r--drivers/ide/Makefile3
-rw-r--r--drivers/ide/cmd64x.c4
-rw-r--r--drivers/ide/cy82c693.c1
-rw-r--r--drivers/ide/gayle.c6
-rw-r--r--drivers/ide/hpt366.c8
-rw-r--r--drivers/ide/ide-acpi.c4
-rw-r--r--drivers/ide/ide-cd.c142
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-dma-sff.c54
-rw-r--r--drivers/ide/ide-io.c292
-rw-r--r--drivers/ide/ide-ioctls.c5
-rw-r--r--drivers/ide/ide-iops.c48
-rw-r--r--drivers/ide/ide-legacy.c58
-rw-r--r--drivers/ide/ide-lib.c105
-rw-r--r--drivers/ide/ide-park.c16
-rw-r--r--drivers/ide/ide-pm.c235
-rw-r--r--drivers/ide/ide-probe.c166
-rw-r--r--drivers/ide/ide-proc.c29
-rw-r--r--drivers/ide/ide.c89
-rw-r--r--drivers/ide/pdc202xx_old.c9
-rw-r--r--drivers/ide/rz1000.c36
-rw-r--r--drivers/ide/trm290.c4
-rw-r--r--drivers/ide/tx4938ide.c4
-rw-r--r--drivers/ide/tx4939ide.c10
-rw-r--r--drivers/ide/umc8672.c11
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--include/linux/ide.h37
28 files changed, 635 insertions, 783 deletions
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e6857e01d1b..7a0a84b042c 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -62,6 +62,9 @@ config IDE_TIMINGS
62config IDE_ATAPI 62config IDE_ATAPI
63 bool 63 bool
64 64
65config IDE_LEGACY
66 bool
67
65config BLK_DEV_IDE_SATA 68config BLK_DEV_IDE_SATA
66 bool "Support for SATA (deprecated; conflicts with libata SATA driver)" 69 bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
67 default n 70 default n
@@ -856,6 +859,7 @@ config BLK_DEV_4DRIVES
856config BLK_DEV_ALI14XX 859config BLK_DEV_ALI14XX
857 tristate "ALI M14xx support" 860 tristate "ALI M14xx support"
858 select IDE_TIMINGS 861 select IDE_TIMINGS
862 select IDE_LEGACY
859 help 863 help
860 This driver is enabled at runtime using the "ali14xx.probe" kernel 864 This driver is enabled at runtime using the "ali14xx.probe" kernel
861 boot parameter. It enables support for the secondary IDE interface 865 boot parameter. It enables support for the secondary IDE interface
@@ -866,6 +870,7 @@ config BLK_DEV_ALI14XX
866 870
867config BLK_DEV_DTC2278 871config BLK_DEV_DTC2278
868 tristate "DTC-2278 support" 872 tristate "DTC-2278 support"
873 select IDE_LEGACY
869 help 874 help
870 This driver is enabled at runtime using the "dtc2278.probe" kernel 875 This driver is enabled at runtime using the "dtc2278.probe" kernel
871 boot parameter. It enables support for the secondary IDE interface 876 boot parameter. It enables support for the secondary IDE interface
@@ -876,6 +881,7 @@ config BLK_DEV_DTC2278
876config BLK_DEV_HT6560B 881config BLK_DEV_HT6560B
877 tristate "Holtek HT6560B support" 882 tristate "Holtek HT6560B support"
878 select IDE_TIMINGS 883 select IDE_TIMINGS
884 select IDE_LEGACY
879 help 885 help
880 This driver is enabled at runtime using the "ht6560b.probe" kernel 886 This driver is enabled at runtime using the "ht6560b.probe" kernel
881 boot parameter. It enables support for the secondary IDE interface 887 boot parameter. It enables support for the secondary IDE interface
@@ -886,6 +892,7 @@ config BLK_DEV_HT6560B
886config BLK_DEV_QD65XX 892config BLK_DEV_QD65XX
887 tristate "QDI QD65xx support" 893 tristate "QDI QD65xx support"
888 select IDE_TIMINGS 894 select IDE_TIMINGS
895 select IDE_LEGACY
889 help 896 help
890 This driver is enabled at runtime using the "qd65xx.probe" kernel 897 This driver is enabled at runtime using the "qd65xx.probe" kernel
891 boot parameter. It permits faster I/O speeds to be set. See the 898 boot parameter. It permits faster I/O speeds to be set. See the
@@ -894,6 +901,7 @@ config BLK_DEV_QD65XX
894 901
895config BLK_DEV_UMC8672 902config BLK_DEV_UMC8672
896 tristate "UMC-8672 support" 903 tristate "UMC-8672 support"
904 select IDE_LEGACY
897 help 905 help
898 This driver is enabled at runtime using the "umc8672.probe" kernel 906 This driver is enabled at runtime using the "umc8672.probe" kernel
899 boot parameter. It enables support for the secondary IDE interface 907 boot parameter. It enables support for the secondary IDE interface
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 7818d402b18..177e3f8523e 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -5,7 +5,7 @@
5EXTRA_CFLAGS += -Idrivers/ide 5EXTRA_CFLAGS += -Idrivers/ide
6 6
7ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ 7ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
8 ide-taskfile.o ide-park.o ide-pio-blacklist.o 8 ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o
9 9
10# core IDE code 10# core IDE code
11ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o 11ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
@@ -15,6 +15,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
15ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o 15ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o
16ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o 16ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
17ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o 17ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
18ide-core-$(CONFIG_IDE_LEGACY) += ide-legacy.o
18 19
19obj-$(CONFIG_IDE) += ide-core.o 20obj-$(CONFIG_IDE) += ide-core.o
20 21
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 935385c77e0..3623bf013bc 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -424,10 +424,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
424 .name = DRV_NAME, 424 .name = DRV_NAME,
425 .init_chipset = init_chipset_cmd64x, 425 .init_chipset = init_chipset_cmd64x,
426 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 426 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
427 .chipset = ide_cmd646,
428 .port_ops = &cmd64x_port_ops, 427 .port_ops = &cmd64x_port_ops,
429 .dma_ops = &cmd648_dma_ops, 428 .dma_ops = &cmd648_dma_ops,
430 .host_flags = IDE_HFLAG_ABUSE_PREFETCH, 429 .host_flags = IDE_HFLAG_SERIALIZE |
430 IDE_HFLAG_ABUSE_PREFETCH,
431 .pio_mask = ATA_PIO5, 431 .pio_mask = ATA_PIO5,
432 .mwdma_mask = ATA_MWDMA2, 432 .mwdma_mask = ATA_MWDMA2,
433 .udma_mask = ATA_UDMA2, 433 .udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 5297f07d293..d37baf8ecc5 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -292,7 +292,6 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
292 .name = DRV_NAME, 292 .name = DRV_NAME,
293 .init_iops = init_iops_cy82c693, 293 .init_iops = init_iops_cy82c693,
294 .port_ops = &cy82c693_port_ops, 294 .port_ops = &cy82c693_port_ops,
295 .chipset = ide_cy82c693,
296 .host_flags = IDE_HFLAG_SINGLE, 295 .host_flags = IDE_HFLAG_SINGLE,
297 .pio_mask = ATA_PIO4, 296 .pio_mask = ATA_PIO4,
298 .swdma_mask = ATA_SWDMA2, 297 .swdma_mask = ATA_SWDMA2,
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 69150688656..59bd0be9dcb 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -117,6 +117,10 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
117 hw->chipset = ide_generic; 117 hw->chipset = ide_generic;
118} 118}
119 119
120static const struct ide_port_info gayle_port_info = {
121 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA,
122};
123
120 /* 124 /*
121 * Probe for a Gayle IDE interface (and optionally for an IDE doubler) 125 * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
122 */ 126 */
@@ -178,7 +182,7 @@ found:
178 hws[i] = &hw[i]; 182 hws[i] = &hw[i];
179 } 183 }
180 184
181 rc = ide_host_add(NULL, hws, NULL); 185 rc = ide_host_add(&gayle_port_info, hws, NULL);
182 if (rc) 186 if (rc)
183 release_mem_region(res_start, res_n); 187 release_mem_region(res_start, res_n);
184 188
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index f5afd46ed51..b18e10d99d2 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -135,7 +135,6 @@
135/* various tuning parameters */ 135/* various tuning parameters */
136#define HPT_RESET_STATE_ENGINE 136#define HPT_RESET_STATE_ENGINE
137#undef HPT_DELAY_INTERRUPT 137#undef HPT_DELAY_INTERRUPT
138#define HPT_SERIALIZE_IO 0
139 138
140static const char *quirk_drives[] = { 139static const char *quirk_drives[] = {
141 "QUANTUM FIREBALLlct08 08", 140 "QUANTUM FIREBALLlct08 08",
@@ -1288,7 +1287,6 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
1288static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) 1287static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1289{ 1288{
1290 struct hpt_info *info = hpt3xx_get_info(hwif->dev); 1289 struct hpt_info *info = hpt3xx_get_info(hwif->dev);
1291 int serialize = HPT_SERIALIZE_IO;
1292 u8 chip_type = info->chip_type; 1290 u8 chip_type = info->chip_type;
1293 1291
1294 /* Cache the channel's MISC. control registers' offset */ 1292 /* Cache the channel's MISC. control registers' offset */
@@ -1305,13 +1303,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1305 * Clock is shared between the channels, 1303 * Clock is shared between the channels,
1306 * so we'll have to serialize them... :-( 1304 * so we'll have to serialize them... :-(
1307 */ 1305 */
1308 serialize = 1; 1306 hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
1309 hwif->rw_disk = &hpt3xxn_rw_disk; 1307 hwif->rw_disk = &hpt3xxn_rw_disk;
1310 } 1308 }
1311
1312 /* Serialize access to this device if needed */
1313 if (serialize && hwif->mate)
1314 hwif->serialized = hwif->mate->serialized = 1;
1315} 1309}
1316 1310
1317static int __devinit init_dma_hpt366(ide_hwif_t *hwif, 1311static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 244a8a052ce..fd4a3643305 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -615,10 +615,10 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
615 in_params[0].buffer.length = sizeof(struct GTM_buffer); 615 in_params[0].buffer.length = sizeof(struct GTM_buffer);
616 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm; 616 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
617 in_params[1].type = ACPI_TYPE_BUFFER; 617 in_params[1].type = ACPI_TYPE_BUFFER;
618 in_params[1].buffer.length = sizeof(ATA_ID_WORDS * 2); 618 in_params[1].buffer.length = ATA_ID_WORDS * 2;
619 in_params[1].buffer.pointer = (u8 *)&master->idbuff; 619 in_params[1].buffer.pointer = (u8 *)&master->idbuff;
620 in_params[2].type = ACPI_TYPE_BUFFER; 620 in_params[2].type = ACPI_TYPE_BUFFER;
621 in_params[2].buffer.length = sizeof(ATA_ID_WORDS * 2); 621 in_params[2].buffer.length = ATA_ID_WORDS * 2;
622 in_params[2].buffer.pointer = (u8 *)&slave->idbuff; 622 in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
623 /* Output buffer: _STM has no output */ 623 /* Output buffer: _STM has no output */
624 624
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 42ab6d8715f..5daa4dd1b01 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -262,7 +262,6 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
262 struct request *failed = (struct request *) rq->buffer; 262 struct request *failed = (struct request *) rq->buffer;
263 struct cdrom_info *info = drive->driver_data; 263 struct cdrom_info *info = drive->driver_data;
264 void *sense = &info->sense_data; 264 void *sense = &info->sense_data;
265 unsigned long flags;
266 265
267 if (failed) { 266 if (failed) {
268 if (failed->sense) { 267 if (failed->sense) {
@@ -278,11 +277,9 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
278 failed->hard_nr_sectors)) 277 failed->hard_nr_sectors))
279 BUG(); 278 BUG();
280 } else { 279 } else {
281 spin_lock_irqsave(&ide_lock, flags); 280 if (blk_end_request(failed, -EIO,
282 if (__blk_end_request(failed, -EIO, 281 failed->data_len))
283 failed->data_len))
284 BUG(); 282 BUG();
285 spin_unlock_irqrestore(&ide_lock, flags);
286 } 283 }
287 } else 284 } else
288 cdrom_analyze_sense_data(drive, NULL, sense); 285 cdrom_analyze_sense_data(drive, NULL, sense);
@@ -317,7 +314,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
317static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 314static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
318{ 315{
319 ide_hwif_t *hwif = drive->hwif; 316 ide_hwif_t *hwif = drive->hwif;
320 struct request *rq = hwif->hwgroup->rq; 317 ide_hwgroup_t *hwgroup = hwif->hwgroup;
318 struct request *rq = hwgroup->rq;
321 int stat, err, sense_key; 319 int stat, err, sense_key;
322 320
323 /* check for errors */ 321 /* check for errors */
@@ -426,16 +424,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
426 if (time_after(jiffies, info->write_timeout)) 424 if (time_after(jiffies, info->write_timeout))
427 do_end_request = 1; 425 do_end_request = 1;
428 else { 426 else {
427 struct request_queue *q = drive->queue;
429 unsigned long flags; 428 unsigned long flags;
430 429
431 /* 430 /*
432 * take a breather relying on the unplug 431 * take a breather relying on the unplug
433 * timer to kick us again 432 * timer to kick us again
434 */ 433 */
435 spin_lock_irqsave(&ide_lock, flags); 434 spin_lock_irqsave(q->queue_lock, flags);
436 blk_plug_device(drive->queue); 435 blk_plug_device(q);
437 spin_unlock_irqrestore(&ide_lock, 436 spin_unlock_irqrestore(q->queue_lock, flags);
438 flags); 437
439 return 1; 438 return 1;
440 } 439 }
441 } 440 }
@@ -504,12 +503,14 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
504 503
505end_request: 504end_request:
506 if (stat & ATA_ERR) { 505 if (stat & ATA_ERR) {
506 struct request_queue *q = drive->queue;
507 unsigned long flags; 507 unsigned long flags;
508 508
509 spin_lock_irqsave(&ide_lock, flags); 509 spin_lock_irqsave(q->queue_lock, flags);
510 blkdev_dequeue_request(rq); 510 blkdev_dequeue_request(rq);
511 HWGROUP(drive)->rq = NULL; 511 spin_unlock_irqrestore(q->queue_lock, flags);
512 spin_unlock_irqrestore(&ide_lock, flags); 512
513 hwgroup->rq = NULL;
513 514
514 cdrom_queue_request_sense(drive, rq->sense, rq); 515 cdrom_queue_request_sense(drive, rq->sense, rq);
515 } else 516 } else
@@ -773,52 +774,6 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
773 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); 774 return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
774} 775}
775 776
776#define IDECD_SEEK_THRESHOLD (1000) /* 1000 blocks */
777#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
778#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
779
780static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
781{
782 struct cdrom_info *info = drive->driver_data;
783 int stat;
784 static int retry = 10;
785
786 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
787
788 if (cdrom_decode_status(drive, 0, &stat))
789 return ide_stopped;
790
791 drive->atapi_flags |= IDE_AFLAG_SEEKING;
792
793 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
794 if (--retry == 0)
795 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
796 }
797 return ide_stopped;
798}
799
800static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq)
801{
802 sector_t frame = rq->sector;
803
804 ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
805
806 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
807
808 memset(rq->cmd, 0, BLK_MAX_CDB);
809 rq->cmd[0] = GPCMD_SEEK;
810 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
811
812 rq->timeout = ATAPI_WAIT_PC;
813}
814
815static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
816{
817 struct request *rq = drive->hwif->hwgroup->rq;
818
819 return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
820}
821
822/* 777/*
823 * Fix up a possibly partially-processed request so that we can start it over 778 * Fix up a possibly partially-processed request so that we can start it over
824 * entirely, or even put it back on the request queue. 779 * entirely, or even put it back on the request queue.
@@ -950,7 +905,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq)
950static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) 905static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
951{ 906{
952 ide_hwif_t *hwif = drive->hwif; 907 ide_hwif_t *hwif = drive->hwif;
953 struct request *rq = HWGROUP(drive)->rq; 908 ide_hwgroup_t *hwgroup = hwif->hwgroup;
909 struct request *rq = hwgroup->rq;
954 xfer_func_t *xferfunc; 910 xfer_func_t *xferfunc;
955 ide_expiry_t *expiry = NULL; 911 ide_expiry_t *expiry = NULL;
956 int dma_error = 0, dma, stat, thislen, uptodate = 0; 912 int dma_error = 0, dma, stat, thislen, uptodate = 0;
@@ -1148,17 +1104,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1148 1104
1149end_request: 1105end_request:
1150 if (blk_pc_request(rq)) { 1106 if (blk_pc_request(rq)) {
1151 unsigned long flags;
1152 unsigned int dlen = rq->data_len; 1107 unsigned int dlen = rq->data_len;
1153 1108
1154 if (dma) 1109 if (dma)
1155 rq->data_len = 0; 1110 rq->data_len = 0;
1156 1111
1157 spin_lock_irqsave(&ide_lock, flags); 1112 if (blk_end_request(rq, 0, dlen))
1158 if (__blk_end_request(rq, 0, dlen))
1159 BUG(); 1113 BUG();
1160 HWGROUP(drive)->rq = NULL; 1114
1161 spin_unlock_irqrestore(&ide_lock, flags); 1115 hwgroup->rq = NULL;
1162 } else { 1116 } else {
1163 if (!uptodate) 1117 if (!uptodate)
1164 rq->cmd_flags |= REQ_FAILED; 1118 rq->cmd_flags |= REQ_FAILED;
@@ -1260,7 +1214,6 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1260static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, 1214static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1261 sector_t block) 1215 sector_t block)
1262{ 1216{
1263 struct cdrom_info *info = drive->driver_data;
1264 ide_handler_t *fn; 1217 ide_handler_t *fn;
1265 int xferlen; 1218 int xferlen;
1266 1219
@@ -1270,44 +1223,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1270 (unsigned long long)block); 1223 (unsigned long long)block);
1271 1224
1272 if (blk_fs_request(rq)) { 1225 if (blk_fs_request(rq)) {
1273 if (drive->atapi_flags & IDE_AFLAG_SEEKING) { 1226 xferlen = 32768;
1274 ide_hwif_t *hwif = drive->hwif; 1227 fn = cdrom_start_rw_cont;
1275 unsigned long elapsed = jiffies - info->start_seek;
1276 int stat = hwif->tp_ops->read_status(hwif);
1277
1278 if ((stat & ATA_DSC) != ATA_DSC) {
1279 if (elapsed < IDECD_SEEK_TIMEOUT) {
1280 ide_stall_queue(drive,
1281 IDECD_SEEK_TIMER);
1282 return ide_stopped;
1283 }
1284 printk(KERN_ERR PFX "%s: DSC timeout\n",
1285 drive->name);
1286 }
1287 drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
1288 }
1289 if (rq_data_dir(rq) == READ &&
1290 IDE_LARGE_SEEK(info->last_block, block,
1291 IDECD_SEEK_THRESHOLD) &&
1292 (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)) {
1293 xferlen = 0;
1294 fn = cdrom_start_seek_continuation;
1295 1228
1296 drive->dma = 0; 1229 if (cdrom_start_rw(drive, rq) == ide_stopped)
1297 info->start_seek = jiffies; 1230 return ide_stopped;
1298
1299 ide_cd_prepare_seek_request(drive, rq);
1300 } else {
1301 xferlen = 32768;
1302 fn = cdrom_start_rw_cont;
1303
1304 if (cdrom_start_rw(drive, rq) == ide_stopped)
1305 return ide_stopped;
1306 1231
1307 if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped) 1232 if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped)
1308 return ide_stopped; 1233 return ide_stopped;
1309 }
1310 info->last_block = block;
1311 } else if (blk_sense_request(rq) || blk_pc_request(rq) || 1234 } else if (blk_sense_request(rq) || blk_pc_request(rq) ||
1312 rq->cmd_type == REQ_TYPE_ATA_PC) { 1235 rq->cmd_type == REQ_TYPE_ATA_PC) {
1313 xferlen = rq->data_len; 1236 xferlen = rq->data_len;
@@ -1908,13 +1831,6 @@ static ide_proc_entry_t idecd_proc[] = {
1908 { NULL, 0, NULL, NULL } 1831 { NULL, 0, NULL, NULL }
1909}; 1832};
1910 1833
1911ide_devset_rw_flag(dsc_overlap, IDE_DFLAG_DSC_OVERLAP);
1912
1913static const struct ide_proc_devset idecd_settings[] = {
1914 IDE_PROC_DEVSET(dsc_overlap, 0, 1),
1915 { 0 },
1916};
1917
1918static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) 1834static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
1919{ 1835{
1920 return idecd_proc; 1836 return idecd_proc;
@@ -1922,7 +1838,7 @@ static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
1922 1838
1923static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive) 1839static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
1924{ 1840{
1925 return idecd_settings; 1841 return NULL;
1926} 1842}
1927#endif 1843#endif
1928 1844
@@ -2022,11 +1938,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
2022 /* set correct block size */ 1938 /* set correct block size */
2023 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); 1939 blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
2024 1940
2025 if (drive->next != drive)
2026 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
2027 else
2028 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2029
2030 if (ide_cdrom_register(drive, nslots)) { 1941 if (ide_cdrom_register(drive, nslots)) {
2031 printk(KERN_ERR PFX "%s: %s failed to register device with the" 1942 printk(KERN_ERR PFX "%s: %s failed to register device with the"
2032 " cdrom driver.\n", drive->name, __func__); 1943 " cdrom driver.\n", drive->name, __func__);
@@ -2063,7 +1974,6 @@ static void ide_cd_release(struct kref *kref)
2063 kfree(info->toc); 1974 kfree(info->toc);
2064 if (devinfo->handle == drive) 1975 if (devinfo->handle == drive)
2065 unregister_cdrom(devinfo); 1976 unregister_cdrom(devinfo);
2066 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2067 drive->driver_data = NULL; 1977 drive->driver_data = NULL;
2068 blk_queue_prep_rq(drive->queue, NULL); 1978 blk_queue_prep_rq(drive->queue, NULL);
2069 g->private_data = NULL; 1979 g->private_data = NULL;
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 5882b9a9ea8..d5ce3362dbd 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -88,8 +88,6 @@ struct cdrom_info {
88 struct request_sense sense_data; 88 struct request_sense sense_data;
89 89
90 struct request request_sense_request; 90 struct request request_sense_request;
91 unsigned long last_block;
92 unsigned long start_seek;
93 91
94 u8 max_speed; /* Max speed of the drive. */ 92 u8 max_speed; /* Max speed of the drive. */
95 u8 current_speed; /* Current speed of the drive. */ 93 u8 current_speed; /* Current speed of the drive. */
diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c
index cac431f0df1..f6d2d44d8a9 100644
--- a/drivers/ide/ide-dma-sff.c
+++ b/drivers/ide/ide-dma-sff.c
@@ -98,10 +98,10 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
98{ 98{
99 ide_hwif_t *hwif = drive->hwif; 99 ide_hwif_t *hwif = drive->hwif;
100 __le32 *table = (__le32 *)hwif->dmatable_cpu; 100 __le32 *table = (__le32 *)hwif->dmatable_cpu;
101 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
102 unsigned int count = 0; 101 unsigned int count = 0;
103 int i; 102 int i;
104 struct scatterlist *sg; 103 struct scatterlist *sg;
104 u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
105 105
106 hwif->sg_nents = ide_build_sglist(drive, rq); 106 hwif->sg_nents = ide_build_sglist(drive, rq);
107 if (hwif->sg_nents == 0) 107 if (hwif->sg_nents == 0)
@@ -176,15 +176,10 @@ int ide_dma_setup(ide_drive_t *drive)
176{ 176{
177 ide_hwif_t *hwif = drive->hwif; 177 ide_hwif_t *hwif = drive->hwif;
178 struct request *rq = hwif->hwgroup->rq; 178 struct request *rq = hwif->hwgroup->rq;
179 unsigned int reading; 179 unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR;
180 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 180 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
181 u8 dma_stat; 181 u8 dma_stat;
182 182
183 if (rq_data_dir(rq))
184 reading = 0;
185 else
186 reading = 1 << 3;
187
188 /* fall back to pio! */ 183 /* fall back to pio! */
189 if (!ide_build_dmatable(drive, rq)) { 184 if (!ide_build_dmatable(drive, rq)) {
190 ide_map_sg(drive, rq); 185 ide_map_sg(drive, rq);
@@ -209,10 +204,11 @@ int ide_dma_setup(ide_drive_t *drive)
209 204
210 /* clear INTR & ERROR flags */ 205 /* clear INTR & ERROR flags */
211 if (mmio) 206 if (mmio)
212 writeb(dma_stat | 6, 207 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
213 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 208 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
214 else 209 else
215 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 210 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
211 hwif->dma_base + ATA_DMA_STATUS);
216 212
217 drive->waiting_for_dma = 1; 213 drive->waiting_for_dma = 1;
218 return 0; 214 return 0;
@@ -246,14 +242,13 @@ static int dma_timer_expiry(ide_drive_t *drive)
246 242
247 hwif->hwgroup->expiry = NULL; /* one free ride for now */ 243 hwif->hwgroup->expiry = NULL; /* one free ride for now */
248 244
249 /* 1 dmaing, 2 error, 4 intr */ 245 if (dma_stat & ATA_DMA_ERR) /* ERROR */
250 if (dma_stat & 2) /* ERROR */
251 return -1; 246 return -1;
252 247
253 if (dma_stat & 1) /* DMAing */ 248 if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
254 return WAIT_CMD; 249 return WAIT_CMD;
255 250
256 if (dma_stat & 4) /* Got an Interrupt */ 251 if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
257 return WAIT_CMD; 252 return WAIT_CMD;
258 253
259 return 0; /* Status is unknown -- reset the bus */ 254 return 0; /* Status is unknown -- reset the bus */
@@ -279,12 +274,11 @@ void ide_dma_start(ide_drive_t *drive)
279 */ 274 */
280 if (hwif->host_flags & IDE_HFLAG_MMIO) { 275 if (hwif->host_flags & IDE_HFLAG_MMIO) {
281 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 276 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
282 /* start DMA */ 277 writeb(dma_cmd | ATA_DMA_START,
283 writeb(dma_cmd | 1,
284 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 278 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
285 } else { 279 } else {
286 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 280 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
287 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); 281 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
288 } 282 }
289 283
290 wmb(); 284 wmb();
@@ -296,19 +290,18 @@ int ide_dma_end(ide_drive_t *drive)
296{ 290{
297 ide_hwif_t *hwif = drive->hwif; 291 ide_hwif_t *hwif = drive->hwif;
298 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 292 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
299 u8 dma_stat = 0, dma_cmd = 0; 293 u8 dma_stat = 0, dma_cmd = 0, mask;
300 294
301 drive->waiting_for_dma = 0; 295 drive->waiting_for_dma = 0;
302 296
297 /* stop DMA */
303 if (mmio) { 298 if (mmio) {
304 /* get DMA command mode */
305 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 299 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
306 /* stop DMA */ 300 writeb(dma_cmd & ~ATA_DMA_START,
307 writeb(dma_cmd & ~1,
308 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 301 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
309 } else { 302 } else {
310 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 303 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
311 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 304 outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
312 } 305 }
313 306
314 /* get DMA status */ 307 /* get DMA status */
@@ -316,16 +309,21 @@ int ide_dma_end(ide_drive_t *drive)
316 309
317 if (mmio) 310 if (mmio)
318 /* clear the INTR & ERROR bits */ 311 /* clear the INTR & ERROR bits */
319 writeb(dma_stat | 6, 312 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
320 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 313 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
321 else 314 else
322 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 315 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
316 hwif->dma_base + ATA_DMA_STATUS);
323 317
324 /* purge DMA mappings */ 318 /* purge DMA mappings */
325 ide_destroy_dmatable(drive); 319 ide_destroy_dmatable(drive);
326 /* verify good DMA status */
327 wmb(); 320 wmb();
328 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 321
322 /* verify good DMA status */
323 mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR;
324 if ((dma_stat & mask) != ATA_DMA_INTR)
325 return 0x10 | dma_stat;
326 return 0;
329} 327}
330EXPORT_SYMBOL_GPL(ide_dma_end); 328EXPORT_SYMBOL_GPL(ide_dma_end);
331 329
@@ -335,11 +333,7 @@ int ide_dma_test_irq(ide_drive_t *drive)
335 ide_hwif_t *hwif = drive->hwif; 333 ide_hwif_t *hwif = drive->hwif;
336 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 334 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
337 335
338 /* return 1 if INTR asserted */ 336 return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
339 if ((dma_stat & 4) == 4)
340 return 1;
341
342 return 0;
343} 337}
344EXPORT_SYMBOL_GPL(ide_dma_test_irq); 338EXPORT_SYMBOL_GPL(ide_dma_test_irq);
345 339
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index cc35d6dbd41..ecacc008fda 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -84,11 +84,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
84 ide_dma_on(drive); 84 ide_dma_on(drive);
85 } 85 }
86 86
87 if (!__blk_end_request(rq, error, nr_bytes)) { 87 if (!blk_end_request(rq, error, nr_bytes))
88 if (dequeue)
89 HWGROUP(drive)->rq = NULL;
90 ret = 0; 88 ret = 0;
91 } 89
90 if (ret == 0 && dequeue)
91 drive->hwif->hwgroup->rq = NULL;
92 92
93 return ret; 93 return ret;
94} 94}
@@ -107,16 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{ 108{
109 unsigned int nr_bytes = nr_sectors << 9; 109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq; 110 struct request *rq = drive->hwif->hwgroup->rq;
111 unsigned long flags;
112 int ret = 1;
113
114 /*
115 * room for locking improvements here, the calls below don't
116 * need the queue lock held at all
117 */
118 spin_lock_irqsave(&ide_lock, flags);
119 rq = HWGROUP(drive)->rq;
120 111
121 if (!nr_bytes) { 112 if (!nr_bytes) {
122 if (blk_pc_request(rq)) 113 if (blk_pc_request(rq))
@@ -125,105 +116,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
125 nr_bytes = rq->hard_cur_sectors << 9; 116 nr_bytes = rq->hard_cur_sectors << 9;
126 } 117 }
127 118
128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
129
130 spin_unlock_irqrestore(&ide_lock, flags);
131 return ret;
132} 120}
133EXPORT_SYMBOL(ide_end_request); 121EXPORT_SYMBOL(ide_end_request);
134 122
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{
137 struct request_pm_state *pm = rq->data;
138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
143 if (drive->media != ide_disk)
144 return;
145
146 switch (pm->pm_step) {
147 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
148 if (pm->pm_state == PM_EVENT_FREEZE)
149 pm->pm_step = IDE_PM_COMPLETED;
150 else
151 pm->pm_step = IDE_PM_STANDBY;
152 break;
153 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
154 pm->pm_step = IDE_PM_COMPLETED;
155 break;
156 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
157 pm->pm_step = IDE_PM_IDLE;
158 break;
159 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
160 pm->pm_step = IDE_PM_RESTORE_DMA;
161 break;
162 }
163}
164
165static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
166{
167 struct request_pm_state *pm = rq->data;
168 ide_task_t *args = rq->special;
169
170 memset(args, 0, sizeof(*args));
171
172 switch (pm->pm_step) {
173 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
174 if (drive->media != ide_disk)
175 break;
176 /* Not supported? Switch to next step now. */
177 if (ata_id_flush_enabled(drive->id) == 0 ||
178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
179 ide_complete_power_step(drive, rq);
180 return ide_stopped;
181 }
182 if (ata_id_flush_ext_enabled(drive->id))
183 args->tf.command = ATA_CMD_FLUSH_EXT;
184 else
185 args->tf.command = ATA_CMD_FLUSH;
186 goto out_do_tf;
187 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
188 args->tf.command = ATA_CMD_STANDBYNOW1;
189 goto out_do_tf;
190 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
191 ide_set_max_pio(drive);
192 /*
193 * skip IDE_PM_IDLE for ATAPI devices
194 */
195 if (drive->media != ide_disk)
196 pm->pm_step = IDE_PM_RESTORE_DMA;
197 else
198 ide_complete_power_step(drive, rq);
199 return ide_stopped;
200 case IDE_PM_IDLE: /* Resume step 2 (idle) */
201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
202 goto out_do_tf;
203 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
204 /*
205 * Right now, all we do is call ide_set_dma(drive),
206 * we could be smarter and check for current xfer_speed
207 * in struct drive etc...
208 */
209 if (drive->hwif->dma_ops == NULL)
210 break;
211 /*
212 * TODO: respect IDE_DFLAG_USING_DMA
213 */
214 ide_set_dma(drive);
215 break;
216 }
217
218 pm->pm_step = IDE_PM_COMPLETED;
219 return ide_stopped;
220
221out_do_tf:
222 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
223 args->data_phase = TASKFILE_NO_DATA;
224 return do_rw_taskfile(drive, args);
225}
226
227/** 123/**
228 * ide_end_dequeued_request - complete an IDE I/O 124 * ide_end_dequeued_request - complete an IDE I/O
229 * @drive: IDE device for the I/O 125 * @drive: IDE device for the I/O
@@ -242,48 +138,12 @@ out_do_tf:
242int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 138int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
243 int uptodate, int nr_sectors) 139 int uptodate, int nr_sectors)
244{ 140{
245 unsigned long flags;
246 int ret;
247
248 spin_lock_irqsave(&ide_lock, flags);
249 BUG_ON(!blk_rq_started(rq)); 141 BUG_ON(!blk_rq_started(rq));
250 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
251 spin_unlock_irqrestore(&ide_lock, flags);
252 142
253 return ret; 143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
254} 144}
255EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 145EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
256 146
257
258/**
259 * ide_complete_pm_request - end the current Power Management request
260 * @drive: target drive
261 * @rq: request
262 *
263 * This function cleans up the current PM request and stops the queue
264 * if necessary.
265 */
266static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
267{
268 unsigned long flags;
269
270#ifdef DEBUG_PM
271 printk("%s: completing PM request, %s\n", drive->name,
272 blk_pm_suspend_request(rq) ? "suspend" : "resume");
273#endif
274 spin_lock_irqsave(&ide_lock, flags);
275 if (blk_pm_suspend_request(rq)) {
276 blk_stop_queue(drive->queue);
277 } else {
278 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
279 blk_start_queue(drive->queue);
280 }
281 HWGROUP(drive)->rq = NULL;
282 if (__blk_end_request(rq, 0, 0))
283 BUG();
284 spin_unlock_irqrestore(&ide_lock, flags);
285}
286
287/** 147/**
288 * ide_end_drive_cmd - end an explicit drive command 148 * ide_end_drive_cmd - end an explicit drive command
289 * @drive: command 149 * @drive: command
@@ -300,19 +160,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
300 160
301void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
302{ 162{
303 unsigned long flags; 163 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
304 struct request *rq; 164 struct request *rq = hwgroup->rq;
305
306 spin_lock_irqsave(&ide_lock, flags);
307 rq = HWGROUP(drive)->rq;
308 spin_unlock_irqrestore(&ide_lock, flags);
309 165
310 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
311 ide_task_t *task = (ide_task_t *)rq->special; 167 ide_task_t *task = (ide_task_t *)rq->special;
312 168
313 if (rq->errors == 0)
314 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
315
316 if (task) { 169 if (task) {
317 struct ide_taskfile *tf = &task->tf; 170 struct ide_taskfile *tf = &task->tf;
318 171
@@ -333,15 +186,14 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
333 return; 186 return;
334 } 187 }
335 188
336 spin_lock_irqsave(&ide_lock, flags); 189 hwgroup->rq = NULL;
337 HWGROUP(drive)->rq = NULL; 190
338 rq->errors = err; 191 rq->errors = err;
339 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 192
340 blk_rq_bytes(rq)))) 193 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
194 blk_rq_bytes(rq))))
341 BUG(); 195 BUG();
342 spin_unlock_irqrestore(&ide_lock, flags);
343} 196}
344
345EXPORT_SYMBOL(ide_end_drive_cmd); 197EXPORT_SYMBOL(ide_end_drive_cmd);
346 198
347static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 199static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
@@ -720,40 +572,6 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
720 } 572 }
721} 573}
722 574
723static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
724{
725 struct request_pm_state *pm = rq->data;
726
727 if (blk_pm_suspend_request(rq) &&
728 pm->pm_step == IDE_PM_START_SUSPEND)
729 /* Mark drive blocked when starting the suspend sequence. */
730 drive->dev_flags |= IDE_DFLAG_BLOCKED;
731 else if (blk_pm_resume_request(rq) &&
732 pm->pm_step == IDE_PM_START_RESUME) {
733 /*
734 * The first thing we do on wakeup is to wait for BSY bit to
735 * go away (with a looong timeout) as a drive on this hwif may
736 * just be POSTing itself.
737 * We do that before even selecting as the "other" device on
738 * the bus may be broken enough to walk on our toes at this
739 * point.
740 */
741 ide_hwif_t *hwif = drive->hwif;
742 int rc;
743#ifdef DEBUG_PM
744 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
745#endif
746 rc = ide_wait_not_busy(hwif, 35000);
747 if (rc)
748 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
749 SELECT_DRIVE(drive);
750 hwif->tp_ops->set_irq(hwif, 1);
751 rc = ide_wait_not_busy(hwif, 100000);
752 if (rc)
753 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
754 }
755}
756
757/** 575/**
758 * start_request - start of I/O and command issuing for IDE 576 * start_request - start of I/O and command issuing for IDE
759 * 577 *
@@ -927,7 +745,7 @@ repeat:
927 745
928/* 746/*
929 * Issue a new request to a drive from hwgroup 747 * Issue a new request to a drive from hwgroup
930 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 748 * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..);
931 * 749 *
932 * A hwgroup is a serialized group of IDE interfaces. Usually there is 750 * A hwgroup is a serialized group of IDE interfaces. Usually there is
933 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 751 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
@@ -939,7 +757,7 @@ repeat:
939 * possibly along with many other devices. This is especially common in 757 * possibly along with many other devices. This is especially common in
940 * PCI-based systems with off-board IDE controller cards. 758 * PCI-based systems with off-board IDE controller cards.
941 * 759 *
942 * The IDE driver uses the single global ide_lock spinlock to protect 760 * The IDE driver uses a per-hwgroup spinlock to protect
943 * access to the request queues, and to protect the hwgroup->busy flag. 761 * access to the request queues, and to protect the hwgroup->busy flag.
944 * 762 *
945 * The first thread into the driver for a particular hwgroup sets the 763 * The first thread into the driver for a particular hwgroup sets the
@@ -955,7 +773,7 @@ repeat:
955 * will start the next request from the queue. If no more work remains, 773 * will start the next request from the queue. If no more work remains,
956 * the driver will clear the hwgroup->busy flag and exit. 774 * the driver will clear the hwgroup->busy flag and exit.
957 * 775 *
958 * The ide_lock (spinlock) is used to protect all access to the 776 * The per-hwgroup spinlock is used to protect all access to the
959 * hwgroup->busy flag, but is otherwise not needed for most processing in 777 * hwgroup->busy flag, but is otherwise not needed for most processing in
960 * the driver. This makes the driver much more friendlier to shared IRQs 778 * the driver. This makes the driver much more friendlier to shared IRQs
961 * than previous designs, while remaining 100% (?) SMP safe and capable. 779 * than previous designs, while remaining 100% (?) SMP safe and capable.
@@ -968,7 +786,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
968 ide_startstop_t startstop; 786 ide_startstop_t startstop;
969 int loops = 0; 787 int loops = 0;
970 788
971 /* caller must own ide_lock */ 789 /* caller must own hwgroup->lock */
972 BUG_ON(!irqs_disabled()); 790 BUG_ON(!irqs_disabled());
973 791
974 while (!hwgroup->busy) { 792 while (!hwgroup->busy) {
@@ -1023,12 +841,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1023 } 841 }
1024 again: 842 again:
1025 hwif = HWIF(drive); 843 hwif = HWIF(drive);
1026 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 844 if (hwif != hwgroup->hwif) {
1027 /* 845 /*
1028 * set nIEN for previous hwif, drives in the 846 * set nIEN for previous hwif, drives in the
1029 * quirk_list may not like intr setups/cleanups 847 * quirk_list may not like intr setups/cleanups
1030 */ 848 */
1031 if (drive->quirk_list != 1) 849 if (drive->quirk_list == 0)
1032 hwif->tp_ops->set_irq(hwif, 0); 850 hwif->tp_ops->set_irq(hwif, 0);
1033 } 851 }
1034 hwgroup->hwif = hwif; 852 hwgroup->hwif = hwif;
@@ -1036,11 +854,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1036 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 854 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
1037 drive->service_start = jiffies; 855 drive->service_start = jiffies;
1038 856
1039 if (blk_queue_plugged(drive->queue)) {
1040 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1041 break;
1042 }
1043
1044 /* 857 /*
1045 * we know that the queue isn't empty, but this can happen 858 * we know that the queue isn't empty, but this can happen
1046 * if the q->prep_rq_fn() decides to kill a request 859 * if the q->prep_rq_fn() decides to kill a request
@@ -1090,11 +903,11 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1090 */ 903 */
1091 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 904 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1092 disable_irq_nosync(hwif->irq); 905 disable_irq_nosync(hwif->irq);
1093 spin_unlock(&ide_lock); 906 spin_unlock(&hwgroup->lock);
1094 local_irq_enable_in_hardirq(); 907 local_irq_enable_in_hardirq();
1095 /* allow other IRQs while we start this request */ 908 /* allow other IRQs while we start this request */
1096 startstop = start_request(drive, rq); 909 startstop = start_request(drive, rq);
1097 spin_lock_irq(&ide_lock); 910 spin_lock_irq(&hwgroup->lock);
1098 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 911 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1099 enable_irq(hwif->irq); 912 enable_irq(hwif->irq);
1100 if (startstop == ide_stopped) 913 if (startstop == ide_stopped)
@@ -1192,7 +1005,7 @@ void ide_timer_expiry (unsigned long data)
1192 unsigned long flags; 1005 unsigned long flags;
1193 unsigned long wait = -1; 1006 unsigned long wait = -1;
1194 1007
1195 spin_lock_irqsave(&ide_lock, flags); 1008 spin_lock_irqsave(&hwgroup->lock, flags);
1196 1009
1197 if (((handler = hwgroup->handler) == NULL) || 1010 if (((handler = hwgroup->handler) == NULL) ||
1198 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1011 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
@@ -1225,7 +1038,7 @@ void ide_timer_expiry (unsigned long data)
1225 hwgroup->timer.expires = jiffies + wait; 1038 hwgroup->timer.expires = jiffies + wait;
1226 hwgroup->req_gen_timer = hwgroup->req_gen; 1039 hwgroup->req_gen_timer = hwgroup->req_gen;
1227 add_timer(&hwgroup->timer); 1040 add_timer(&hwgroup->timer);
1228 spin_unlock_irqrestore(&ide_lock, flags); 1041 spin_unlock_irqrestore(&hwgroup->lock, flags);
1229 return; 1042 return;
1230 } 1043 }
1231 } 1044 }
@@ -1235,7 +1048,7 @@ void ide_timer_expiry (unsigned long data)
1235 * the handler() function, which means we need to 1048 * the handler() function, which means we need to
1236 * globally mask the specific IRQ: 1049 * globally mask the specific IRQ:
1237 */ 1050 */
1238 spin_unlock(&ide_lock); 1051 spin_unlock(&hwgroup->lock);
1239 hwif = HWIF(drive); 1052 hwif = HWIF(drive);
1240 /* disable_irq_nosync ?? */ 1053 /* disable_irq_nosync ?? */
1241 disable_irq(hwif->irq); 1054 disable_irq(hwif->irq);
@@ -1259,14 +1072,14 @@ void ide_timer_expiry (unsigned long data)
1259 hwif->tp_ops->read_status(hwif)); 1072 hwif->tp_ops->read_status(hwif));
1260 } 1073 }
1261 drive->service_time = jiffies - drive->service_start; 1074 drive->service_time = jiffies - drive->service_start;
1262 spin_lock_irq(&ide_lock); 1075 spin_lock_irq(&hwgroup->lock);
1263 enable_irq(hwif->irq); 1076 enable_irq(hwif->irq);
1264 if (startstop == ide_stopped) 1077 if (startstop == ide_stopped)
1265 hwgroup->busy = 0; 1078 hwgroup->busy = 0;
1266 } 1079 }
1267 } 1080 }
1268 ide_do_request(hwgroup, IDE_NO_IRQ); 1081 ide_do_request(hwgroup, IDE_NO_IRQ);
1269 spin_unlock_irqrestore(&ide_lock, flags); 1082 spin_unlock_irqrestore(&hwgroup->lock, flags);
1270} 1083}
1271 1084
1272/** 1085/**
@@ -1359,18 +1172,16 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1359{ 1172{
1360 unsigned long flags; 1173 unsigned long flags;
1361 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1174 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1362 ide_hwif_t *hwif; 1175 ide_hwif_t *hwif = hwgroup->hwif;
1363 ide_drive_t *drive; 1176 ide_drive_t *drive;
1364 ide_handler_t *handler; 1177 ide_handler_t *handler;
1365 ide_startstop_t startstop; 1178 ide_startstop_t startstop;
1179 irqreturn_t irq_ret = IRQ_NONE;
1366 1180
1367 spin_lock_irqsave(&ide_lock, flags); 1181 spin_lock_irqsave(&hwgroup->lock, flags);
1368 hwif = hwgroup->hwif;
1369 1182
1370 if (!ide_ack_intr(hwif)) { 1183 if (!ide_ack_intr(hwif))
1371 spin_unlock_irqrestore(&ide_lock, flags); 1184 goto out;
1372 return IRQ_NONE;
1373 }
1374 1185
1375 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1186 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1376 /* 1187 /*
@@ -1406,9 +1217,9 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1406 (void)hwif->tp_ops->read_status(hwif); 1217 (void)hwif->tp_ops->read_status(hwif);
1407#endif /* CONFIG_BLK_DEV_IDEPCI */ 1218#endif /* CONFIG_BLK_DEV_IDEPCI */
1408 } 1219 }
1409 spin_unlock_irqrestore(&ide_lock, flags); 1220 goto out;
1410 return IRQ_NONE;
1411 } 1221 }
1222
1412 drive = hwgroup->drive; 1223 drive = hwgroup->drive;
1413 if (!drive) { 1224 if (!drive) {
1414 /* 1225 /*
@@ -1417,10 +1228,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1417 * 1228 *
1418 * [Note - this can occur if the drive is hot unplugged] 1229 * [Note - this can occur if the drive is hot unplugged]
1419 */ 1230 */
1420 spin_unlock_irqrestore(&ide_lock, flags); 1231 goto out_handled;
1421 return IRQ_HANDLED;
1422 } 1232 }
1423 if (!drive_is_ready(drive)) { 1233
1234 if (!drive_is_ready(drive))
1424 /* 1235 /*
1425 * This happens regularly when we share a PCI IRQ with 1236 * This happens regularly when we share a PCI IRQ with
1426 * another device. Unfortunately, it can also happen 1237 * another device. Unfortunately, it can also happen
@@ -1428,9 +1239,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1428 * their status register is up to date. Hopefully we have 1239 * their status register is up to date. Hopefully we have
1429 * enough advance overhead that the latter isn't a problem. 1240 * enough advance overhead that the latter isn't a problem.
1430 */ 1241 */
1431 spin_unlock_irqrestore(&ide_lock, flags); 1242 goto out;
1432 return IRQ_NONE; 1243
1433 }
1434 if (!hwgroup->busy) { 1244 if (!hwgroup->busy) {
1435 hwgroup->busy = 1; /* paranoia */ 1245 hwgroup->busy = 1; /* paranoia */
1436 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1246 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
@@ -1438,7 +1248,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1438 hwgroup->handler = NULL; 1248 hwgroup->handler = NULL;
1439 hwgroup->req_gen++; 1249 hwgroup->req_gen++;
1440 del_timer(&hwgroup->timer); 1250 del_timer(&hwgroup->timer);
1441 spin_unlock(&ide_lock); 1251 spin_unlock(&hwgroup->lock);
1442 1252
1443 if (hwif->port_ops && hwif->port_ops->clear_irq) 1253 if (hwif->port_ops && hwif->port_ops->clear_irq)
1444 hwif->port_ops->clear_irq(drive); 1254 hwif->port_ops->clear_irq(drive);
@@ -1449,7 +1259,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1449 /* service this interrupt, may set handler for next interrupt */ 1259 /* service this interrupt, may set handler for next interrupt */
1450 startstop = handler(drive); 1260 startstop = handler(drive);
1451 1261
1452 spin_lock_irq(&ide_lock); 1262 spin_lock_irq(&hwgroup->lock);
1453 /* 1263 /*
1454 * Note that handler() may have set things up for another 1264 * Note that handler() may have set things up for another
1455 * interrupt to occur soon, but it cannot happen until 1265 * interrupt to occur soon, but it cannot happen until
@@ -1467,8 +1277,11 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1467 "on exit\n", drive->name); 1277 "on exit\n", drive->name);
1468 } 1278 }
1469 } 1279 }
1470 spin_unlock_irqrestore(&ide_lock, flags); 1280out_handled:
1471 return IRQ_HANDLED; 1281 irq_ret = IRQ_HANDLED;
1282out:
1283 spin_unlock_irqrestore(&hwgroup->lock, flags);
1284 return irq_ret;
1472} 1285}
1473 1286
1474/** 1287/**
@@ -1488,16 +1301,17 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1488 1301
1489void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1302void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1490{ 1303{
1304 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1305 struct request_queue *q = drive->queue;
1491 unsigned long flags; 1306 unsigned long flags;
1492 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1493 1307
1494 spin_lock_irqsave(&ide_lock, flags);
1495 hwgroup->rq = NULL; 1308 hwgroup->rq = NULL;
1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1497 blk_start_queueing(drive->queue);
1498 spin_unlock_irqrestore(&ide_lock, flags);
1499}
1500 1309
1310 spin_lock_irqsave(q->queue_lock, flags);
1311 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
1312 blk_start_queueing(q);
1313 spin_unlock_irqrestore(q->queue_lock, flags);
1314}
1501EXPORT_SYMBOL(ide_do_drive_cmd); 1315EXPORT_SYMBOL(ide_do_drive_cmd);
1502 1316
1503void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1317void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index fcde16bb53a..28232c64c34 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -19,7 +19,6 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
19 const struct ide_ioctl_devset *s) 19 const struct ide_ioctl_devset *s)
20{ 20{
21 const struct ide_devset *ds; 21 const struct ide_devset *ds;
22 unsigned long flags;
23 int err = -EOPNOTSUPP; 22 int err = -EOPNOTSUPP;
24 23
25 for (; (ds = s->setting); s++) { 24 for (; (ds = s->setting); s++) {
@@ -33,9 +32,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
33 32
34read_val: 33read_val:
35 mutex_lock(&ide_setting_mtx); 34 mutex_lock(&ide_setting_mtx);
36 spin_lock_irqsave(&ide_lock, flags);
37 err = ds->get(drive); 35 err = ds->get(drive);
38 spin_unlock_irqrestore(&ide_lock, flags);
39 mutex_unlock(&ide_setting_mtx); 36 mutex_unlock(&ide_setting_mtx);
40 return err >= 0 ? put_user(err, (long __user *)arg) : err; 37 return err >= 0 ? put_user(err, (long __user *)arg) : err;
41 38
@@ -98,7 +95,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
98 return -EPERM; 95 return -EPERM;
99 96
100 if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) && 97 if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
101 (drive->media == ide_disk || drive->media == ide_floppy || 98 (drive->media != ide_tape ||
102 (drive->dev_flags & IDE_DFLAG_SCSI))) 99 (drive->dev_flags & IDE_DFLAG_SCSI)))
103 return -EPERM; 100 return -EPERM;
104 101
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c41c3b9b6f0..ad8bd653928 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -835,10 +835,12 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
835void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, 835void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
836 unsigned int timeout, ide_expiry_t *expiry) 836 unsigned int timeout, ide_expiry_t *expiry)
837{ 837{
838 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
838 unsigned long flags; 839 unsigned long flags;
839 spin_lock_irqsave(&ide_lock, flags); 840
841 spin_lock_irqsave(&hwgroup->lock, flags);
840 __ide_set_handler(drive, handler, timeout, expiry); 842 __ide_set_handler(drive, handler, timeout, expiry);
841 spin_unlock_irqrestore(&ide_lock, flags); 843 spin_unlock_irqrestore(&hwgroup->lock, flags);
842} 844}
843 845
844EXPORT_SYMBOL(ide_set_handler); 846EXPORT_SYMBOL(ide_set_handler);
@@ -860,10 +862,11 @@ EXPORT_SYMBOL(ide_set_handler);
860void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, 862void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
861 unsigned timeout, ide_expiry_t *expiry) 863 unsigned timeout, ide_expiry_t *expiry)
862{ 864{
865 ide_hwif_t *hwif = drive->hwif;
866 ide_hwgroup_t *hwgroup = hwif->hwgroup;
863 unsigned long flags; 867 unsigned long flags;
864 ide_hwif_t *hwif = HWIF(drive);
865 868
866 spin_lock_irqsave(&ide_lock, flags); 869 spin_lock_irqsave(&hwgroup->lock, flags);
867 __ide_set_handler(drive, handler, timeout, expiry); 870 __ide_set_handler(drive, handler, timeout, expiry);
868 hwif->tp_ops->exec_command(hwif, cmd); 871 hwif->tp_ops->exec_command(hwif, cmd);
869 /* 872 /*
@@ -873,19 +876,20 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
873 * FIXME: we could skip this delay with care on non shared devices 876 * FIXME: we could skip this delay with care on non shared devices
874 */ 877 */
875 ndelay(400); 878 ndelay(400);
876 spin_unlock_irqrestore(&ide_lock, flags); 879 spin_unlock_irqrestore(&hwgroup->lock, flags);
877} 880}
878EXPORT_SYMBOL(ide_execute_command); 881EXPORT_SYMBOL(ide_execute_command);
879 882
880void ide_execute_pkt_cmd(ide_drive_t *drive) 883void ide_execute_pkt_cmd(ide_drive_t *drive)
881{ 884{
882 ide_hwif_t *hwif = drive->hwif; 885 ide_hwif_t *hwif = drive->hwif;
886 ide_hwgroup_t *hwgroup = hwif->hwgroup;
883 unsigned long flags; 887 unsigned long flags;
884 888
885 spin_lock_irqsave(&ide_lock, flags); 889 spin_lock_irqsave(&hwgroup->lock, flags);
886 hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET); 890 hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
887 ndelay(400); 891 ndelay(400);
888 spin_unlock_irqrestore(&ide_lock, flags); 892 spin_unlock_irqrestore(&hwgroup->lock, flags);
889} 893}
890EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); 894EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
891 895
@@ -1076,22 +1080,16 @@ static void pre_reset(ide_drive_t *drive)
1076 */ 1080 */
1077static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) 1081static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1078{ 1082{
1079 unsigned int unit; 1083 ide_hwif_t *hwif = drive->hwif;
1080 unsigned long flags, timeout; 1084 ide_hwgroup_t *hwgroup = hwif->hwgroup;
1081 ide_hwif_t *hwif; 1085 struct ide_io_ports *io_ports = &hwif->io_ports;
1082 ide_hwgroup_t *hwgroup; 1086 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
1083 struct ide_io_ports *io_ports;
1084 const struct ide_tp_ops *tp_ops;
1085 const struct ide_port_ops *port_ops; 1087 const struct ide_port_ops *port_ops;
1088 unsigned long flags, timeout;
1089 unsigned int unit;
1086 DEFINE_WAIT(wait); 1090 DEFINE_WAIT(wait);
1087 1091
1088 spin_lock_irqsave(&ide_lock, flags); 1092 spin_lock_irqsave(&hwgroup->lock, flags);
1089 hwif = HWIF(drive);
1090 hwgroup = HWGROUP(drive);
1091
1092 io_ports = &hwif->io_ports;
1093
1094 tp_ops = hwif->tp_ops;
1095 1093
1096 /* We must not reset with running handlers */ 1094 /* We must not reset with running handlers */
1097 BUG_ON(hwgroup->handler != NULL); 1095 BUG_ON(hwgroup->handler != NULL);
@@ -1106,7 +1104,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1106 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1104 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1107 hwgroup->polling = 1; 1105 hwgroup->polling = 1;
1108 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); 1106 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1109 spin_unlock_irqrestore(&ide_lock, flags); 1107 spin_unlock_irqrestore(&hwgroup->lock, flags);
1110 return ide_started; 1108 return ide_started;
1111 } 1109 }
1112 1110
@@ -1129,9 +1127,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1129 if (time_before_eq(timeout, now)) 1127 if (time_before_eq(timeout, now))
1130 break; 1128 break;
1131 1129
1132 spin_unlock_irqrestore(&ide_lock, flags); 1130 spin_unlock_irqrestore(&hwgroup->lock, flags);
1133 timeout = schedule_timeout_uninterruptible(timeout - now); 1131 timeout = schedule_timeout_uninterruptible(timeout - now);
1134 spin_lock_irqsave(&ide_lock, flags); 1132 spin_lock_irqsave(&hwgroup->lock, flags);
1135 } while (timeout); 1133 } while (timeout);
1136 finish_wait(&ide_park_wq, &wait); 1134 finish_wait(&ide_park_wq, &wait);
1137 1135
@@ -1143,7 +1141,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1143 pre_reset(&hwif->drives[unit]); 1141 pre_reset(&hwif->drives[unit]);
1144 1142
1145 if (io_ports->ctl_addr == 0) { 1143 if (io_ports->ctl_addr == 0) {
1146 spin_unlock_irqrestore(&ide_lock, flags); 1144 spin_unlock_irqrestore(&hwgroup->lock, flags);
1147 ide_complete_drive_reset(drive, -ENXIO); 1145 ide_complete_drive_reset(drive, -ENXIO);
1148 return ide_stopped; 1146 return ide_stopped;
1149 } 1147 }
@@ -1179,7 +1177,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1179 if (port_ops && port_ops->resetproc) 1177 if (port_ops && port_ops->resetproc)
1180 port_ops->resetproc(drive); 1178 port_ops->resetproc(drive);
1181 1179
1182 spin_unlock_irqrestore(&ide_lock, flags); 1180 spin_unlock_irqrestore(&hwgroup->lock, flags);
1183 return ide_started; 1181 return ide_started;
1184} 1182}
1185 1183
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
new file mode 100644
index 00000000000..8c5dcbf2254
--- /dev/null
+++ b/drivers/ide/ide-legacy.c
@@ -0,0 +1,58 @@
1#include <linux/kernel.h>
2#include <linux/ide.h>
3
4static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
5 u8 port_no, const struct ide_port_info *d,
6 unsigned long config)
7{
8 unsigned long base, ctl;
9 int irq;
10
11 if (port_no == 0) {
12 base = 0x1f0;
13 ctl = 0x3f6;
14 irq = 14;
15 } else {
16 base = 0x170;
17 ctl = 0x376;
18 irq = 15;
19 }
20
21 if (!request_region(base, 8, d->name)) {
22 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
23 d->name, base, base + 7);
24 return;
25 }
26
27 if (!request_region(ctl, 1, d->name)) {
28 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
29 d->name, ctl);
30 release_region(base, 8);
31 return;
32 }
33
34 ide_std_init_ports(hw, base, ctl);
35 hw->irq = irq;
36 hw->chipset = d->chipset;
37 hw->config = config;
38
39 hws[port_no] = hw;
40}
41
42int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
43{
44 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
45
46 memset(&hw, 0, sizeof(hw));
47
48 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
49 ide_legacy_init_one(hws, &hw[0], 0, d, config);
50 ide_legacy_init_one(hws, &hw[1], 1, d, config);
51
52 if (hws[0] == NULL && hws[1] == NULL &&
53 (d->host_flags & IDE_HFLAG_SINGLE))
54 return -ENOENT;
55
56 return ide_host_add(d, hws, NULL);
57}
58EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 9fc4cfb2a27..9f6e33d8a8b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -43,7 +43,6 @@ const char *ide_xfer_verbose(u8 mode)
43 43
44 return s; 44 return s;
45} 45}
46
47EXPORT_SYMBOL(ide_xfer_verbose); 46EXPORT_SYMBOL(ide_xfer_verbose);
48 47
49/** 48/**
@@ -87,7 +86,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
87 * This is used by most chipset support modules when "auto-tuning". 86 * This is used by most chipset support modules when "auto-tuning".
88 */ 87 */
89 88
90u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode) 89u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
91{ 90{
92 u16 *id = drive->id; 91 u16 *id = drive->id;
93 int pio_mode = -1, overridden = 0; 92 int pio_mode = -1, overridden = 0;
@@ -131,7 +130,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
131 130
132 return pio_mode; 131 return pio_mode;
133} 132}
134
135EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); 133EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
136 134
137/* req_pio == "255" for auto-tune */ 135/* req_pio == "255" for auto-tune */
@@ -162,7 +160,6 @@ void ide_set_pio(ide_drive_t *drive, u8 req_pio)
162 160
163 (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio); 161 (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
164} 162}
165
166EXPORT_SYMBOL_GPL(ide_set_pio); 163EXPORT_SYMBOL_GPL(ide_set_pio);
167 164
168/** 165/**
@@ -173,7 +170,7 @@ EXPORT_SYMBOL_GPL(ide_set_pio);
173 * Enable or disable bounce buffering for the device. Drives move 170 * Enable or disable bounce buffering for the device. Drives move
174 * between PIO and DMA and that changes the rules we need. 171 * between PIO and DMA and that changes the rules we need.
175 */ 172 */
176 173
177void ide_toggle_bounce(ide_drive_t *drive, int on) 174void ide_toggle_bounce(ide_drive_t *drive, int on)
178{ 175{
179 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ 176 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
@@ -243,14 +240,13 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
243 return ide_config_drive_speed(drive, mode); 240 return ide_config_drive_speed(drive, mode);
244 } 241 }
245} 242}
246
247EXPORT_SYMBOL_GPL(ide_set_dma_mode); 243EXPORT_SYMBOL_GPL(ide_set_dma_mode);
248 244
249/** 245/**
250 * ide_set_xfer_rate - set transfer rate 246 * ide_set_xfer_rate - set transfer rate
251 * @drive: drive to set 247 * @drive: drive to set
252 * @rate: speed to attempt to set 248 * @rate: speed to attempt to set
253 * 249 *
254 * General helper for setting the speed of an IDE device. This 250 * General helper for setting the speed of an IDE device. This
255 * function knows about user enforced limits from the configuration 251 * function knows about user enforced limits from the configuration
256 * which ->set_pio_mode/->set_dma_mode does not. 252 * which ->set_pio_mode/->set_dma_mode does not.
@@ -277,21 +273,16 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
277 273
278static void ide_dump_opcode(ide_drive_t *drive) 274static void ide_dump_opcode(ide_drive_t *drive)
279{ 275{
280 struct request *rq; 276 struct request *rq = drive->hwif->hwgroup->rq;
281 ide_task_t *task = NULL; 277 ide_task_t *task = NULL;
282 278
283 spin_lock(&ide_lock);
284 rq = NULL;
285 if (HWGROUP(drive))
286 rq = HWGROUP(drive)->rq;
287 spin_unlock(&ide_lock);
288 if (!rq) 279 if (!rq)
289 return; 280 return;
290 281
291 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 282 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
292 task = rq->special; 283 task = rq->special;
293 284
294 printk("ide: failed opcode was: "); 285 printk(KERN_ERR "ide: failed opcode was: ");
295 if (task == NULL) 286 if (task == NULL)
296 printk(KERN_CONT "unknown\n"); 287 printk(KERN_CONT "unknown\n");
297 else 288 else
@@ -329,44 +320,55 @@ static void ide_dump_sector(ide_drive_t *drive)
329 drive->hwif->tp_ops->tf_read(drive, &task); 320 drive->hwif->tp_ops->tf_read(drive, &task);
330 321
331 if (lba48 || (tf->device & ATA_LBA)) 322 if (lba48 || (tf->device & ATA_LBA))
332 printk(", LBAsect=%llu", 323 printk(KERN_CONT ", LBAsect=%llu",
333 (unsigned long long)ide_get_lba_addr(tf, lba48)); 324 (unsigned long long)ide_get_lba_addr(tf, lba48));
334 else 325 else
335 printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, 326 printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
336 tf->device & 0xf, tf->lbal); 327 tf->device & 0xf, tf->lbal);
337} 328}
338 329
339static void ide_dump_ata_error(ide_drive_t *drive, u8 err) 330static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
340{ 331{
341 printk("{ "); 332 printk(KERN_ERR "{ ");
342 if (err & ATA_ABORTED) printk("DriveStatusError "); 333 if (err & ATA_ABORTED)
334 printk(KERN_CONT "DriveStatusError ");
343 if (err & ATA_ICRC) 335 if (err & ATA_ICRC)
344 printk((err & ATA_ABORTED) ? "BadCRC " : "BadSector "); 336 printk(KERN_CONT "%s",
345 if (err & ATA_UNC) printk("UncorrectableError "); 337 (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
346 if (err & ATA_IDNF) printk("SectorIdNotFound "); 338 if (err & ATA_UNC)
347 if (err & ATA_TRK0NF) printk("TrackZeroNotFound "); 339 printk(KERN_CONT "UncorrectableError ");
348 if (err & ATA_AMNF) printk("AddrMarkNotFound "); 340 if (err & ATA_IDNF)
349 printk("}"); 341 printk(KERN_CONT "SectorIdNotFound ");
342 if (err & ATA_TRK0NF)
343 printk(KERN_CONT "TrackZeroNotFound ");
344 if (err & ATA_AMNF)
345 printk(KERN_CONT "AddrMarkNotFound ");
346 printk(KERN_CONT "}");
350 if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK || 347 if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
351 (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) { 348 (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
352 ide_dump_sector(drive); 349 ide_dump_sector(drive);
353 if (HWGROUP(drive) && HWGROUP(drive)->rq) 350 if (HWGROUP(drive) && HWGROUP(drive)->rq)
354 printk(", sector=%llu", 351 printk(KERN_CONT ", sector=%llu",
355 (unsigned long long)HWGROUP(drive)->rq->sector); 352 (unsigned long long)HWGROUP(drive)->rq->sector);
356 } 353 }
357 printk("\n"); 354 printk(KERN_CONT "\n");
358} 355}
359 356
360static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) 357static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
361{ 358{
362 printk("{ "); 359 printk(KERN_ERR "{ ");
363 if (err & ATAPI_ILI) printk("IllegalLengthIndication "); 360 if (err & ATAPI_ILI)
364 if (err & ATAPI_EOM) printk("EndOfMedia "); 361 printk(KERN_CONT "IllegalLengthIndication ");
365 if (err & ATA_ABORTED) printk("AbortedCommand "); 362 if (err & ATAPI_EOM)
366 if (err & ATA_MCR) printk("MediaChangeRequested "); 363 printk(KERN_CONT "EndOfMedia ");
367 if (err & ATAPI_LFS) printk("LastFailedSense=0x%02x ", 364 if (err & ATA_ABORTED)
368 (err & ATAPI_LFS) >> 4); 365 printk(KERN_CONT "AbortedCommand ");
369 printk("}\n"); 366 if (err & ATA_MCR)
367 printk(KERN_CONT "MediaChangeRequested ");
368 if (err & ATAPI_LFS)
369 printk(KERN_CONT "LastFailedSense=0x%02x ",
370 (err & ATAPI_LFS) >> 4);
371 printk(KERN_CONT "}\n");
370} 372}
371 373
372/** 374/**
@@ -382,34 +384,37 @@ static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
382 384
383u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) 385u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
384{ 386{
385 unsigned long flags;
386 u8 err = 0; 387 u8 err = 0;
387 388
388 local_irq_save(flags); 389 printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
389 printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
390 if (stat & ATA_BUSY) 390 if (stat & ATA_BUSY)
391 printk("Busy "); 391 printk(KERN_CONT "Busy ");
392 else { 392 else {
393 if (stat & ATA_DRDY) printk("DriveReady "); 393 if (stat & ATA_DRDY)
394 if (stat & ATA_DF) printk("DeviceFault "); 394 printk(KERN_CONT "DriveReady ");
395 if (stat & ATA_DSC) printk("SeekComplete "); 395 if (stat & ATA_DF)
396 if (stat & ATA_DRQ) printk("DataRequest "); 396 printk(KERN_CONT "DeviceFault ");
397 if (stat & ATA_CORR) printk("CorrectedError "); 397 if (stat & ATA_DSC)
398 if (stat & ATA_IDX) printk("Index "); 398 printk(KERN_CONT "SeekComplete ");
399 if (stat & ATA_ERR) printk("Error "); 399 if (stat & ATA_DRQ)
400 printk(KERN_CONT "DataRequest ");
401 if (stat & ATA_CORR)
402 printk(KERN_CONT "CorrectedError ");
403 if (stat & ATA_IDX)
404 printk(KERN_CONT "Index ");
405 if (stat & ATA_ERR)
406 printk(KERN_CONT "Error ");
400 } 407 }
401 printk("}\n"); 408 printk(KERN_CONT "}\n");
402 if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) { 409 if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
403 err = ide_read_error(drive); 410 err = ide_read_error(drive);
404 printk("%s: %s: error=0x%02x ", drive->name, msg, err); 411 printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
405 if (drive->media == ide_disk) 412 if (drive->media == ide_disk)
406 ide_dump_ata_error(drive, err); 413 ide_dump_ata_error(drive, err);
407 else 414 else
408 ide_dump_atapi_error(drive, err); 415 ide_dump_atapi_error(drive, err);
409 } 416 }
410 ide_dump_opcode(drive); 417 ide_dump_opcode(drive);
411 local_irq_restore(flags);
412 return err; 418 return err;
413} 419}
414
415EXPORT_SYMBOL(ide_dump_status); 420EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 03b00e57e93..63d01c55f86 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -7,17 +7,16 @@ DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
7 7
8static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) 8static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
9{ 9{
10 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
10 struct request_queue *q = drive->queue; 11 struct request_queue *q = drive->queue;
11 struct request *rq; 12 struct request *rq;
12 int rc; 13 int rc;
13 14
14 timeout += jiffies; 15 timeout += jiffies;
15 spin_lock_irq(&ide_lock); 16 spin_lock_irq(&hwgroup->lock);
16 if (drive->dev_flags & IDE_DFLAG_PARKED) { 17 if (drive->dev_flags & IDE_DFLAG_PARKED) {
17 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; 18 int reset_timer = time_before(timeout, drive->sleep);
18 int reset_timer;
19 19
20 reset_timer = time_before(timeout, drive->sleep);
21 drive->sleep = timeout; 20 drive->sleep = timeout;
22 wake_up_all(&ide_park_wq); 21 wake_up_all(&ide_park_wq);
23 if (reset_timer && hwgroup->sleeping && 22 if (reset_timer && hwgroup->sleeping &&
@@ -26,10 +25,10 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
26 hwgroup->busy = 0; 25 hwgroup->busy = 0;
27 blk_start_queueing(q); 26 blk_start_queueing(q);
28 } 27 }
29 spin_unlock_irq(&ide_lock); 28 spin_unlock_irq(&hwgroup->lock);
30 return; 29 return;
31 } 30 }
32 spin_unlock_irq(&ide_lock); 31 spin_unlock_irq(&hwgroup->lock);
33 32
34 rq = blk_get_request(q, READ, __GFP_WAIT); 33 rq = blk_get_request(q, READ, __GFP_WAIT);
35 rq->cmd[0] = REQ_PARK_HEADS; 34 rq->cmd[0] = REQ_PARK_HEADS;
@@ -62,20 +61,21 @@ ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
62 char *buf) 61 char *buf)
63{ 62{
64 ide_drive_t *drive = to_ide_device(dev); 63 ide_drive_t *drive = to_ide_device(dev);
64 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
65 unsigned long now; 65 unsigned long now;
66 unsigned int msecs; 66 unsigned int msecs;
67 67
68 if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD) 68 if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
69 return -EOPNOTSUPP; 69 return -EOPNOTSUPP;
70 70
71 spin_lock_irq(&ide_lock); 71 spin_lock_irq(&hwgroup->lock);
72 now = jiffies; 72 now = jiffies;
73 if (drive->dev_flags & IDE_DFLAG_PARKED && 73 if (drive->dev_flags & IDE_DFLAG_PARKED &&
74 time_after(drive->sleep, now)) 74 time_after(drive->sleep, now))
75 msecs = jiffies_to_msecs(drive->sleep - now); 75 msecs = jiffies_to_msecs(drive->sleep - now);
76 else 76 else
77 msecs = 0; 77 msecs = 0;
78 spin_unlock_irq(&ide_lock); 78 spin_unlock_irq(&hwgroup->lock);
79 79
80 return snprintf(buf, 20, "%u\n", msecs); 80 return snprintf(buf, 20, "%u\n", msecs);
81} 81}
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
new file mode 100644
index 00000000000..8282c6086e6
--- /dev/null
+++ b/drivers/ide/ide-pm.c
@@ -0,0 +1,235 @@
1#include <linux/kernel.h>
2#include <linux/ide.h>
3#include <linux/hdreg.h>
4
5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6{
7 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
8 ide_hwif_t *hwif = HWIF(drive);
9 struct request *rq;
10 struct request_pm_state rqpm;
11 ide_task_t args;
12 int ret;
13
14 /* call ACPI _GTM only once */
15 if ((drive->dn & 1) == 0 || pair == NULL)
16 ide_acpi_get_timing(hwif);
17
18 memset(&rqpm, 0, sizeof(rqpm));
19 memset(&args, 0, sizeof(args));
20 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
21 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
22 rq->special = &args;
23 rq->data = &rqpm;
24 rqpm.pm_step = IDE_PM_START_SUSPEND;
25 if (mesg.event == PM_EVENT_PRETHAW)
26 mesg.event = PM_EVENT_FREEZE;
27 rqpm.pm_state = mesg.event;
28
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
30 blk_put_request(rq);
31
32 /* call ACPI _PS3 only after both devices are suspended */
33 if (ret == 0 && ((drive->dn & 1) || pair == NULL))
34 ide_acpi_set_state(hwif, 0);
35
36 return ret;
37}
38
39int generic_ide_resume(struct device *dev)
40{
41 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
42 ide_hwif_t *hwif = HWIF(drive);
43 struct request *rq;
44 struct request_pm_state rqpm;
45 ide_task_t args;
46 int err;
47
48 /* call ACPI _PS0 / _STM only once */
49 if ((drive->dn & 1) == 0 || pair == NULL) {
50 ide_acpi_set_state(hwif, 1);
51 ide_acpi_push_timing(hwif);
52 }
53
54 ide_acpi_exec_tfs(drive);
55
56 memset(&rqpm, 0, sizeof(rqpm));
57 memset(&args, 0, sizeof(args));
58 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
59 rq->cmd_type = REQ_TYPE_PM_RESUME;
60 rq->cmd_flags |= REQ_PREEMPT;
61 rq->special = &args;
62 rq->data = &rqpm;
63 rqpm.pm_step = IDE_PM_START_RESUME;
64 rqpm.pm_state = PM_EVENT_ON;
65
66 err = blk_execute_rq(drive->queue, NULL, rq, 1);
67 blk_put_request(rq);
68
69 if (err == 0 && dev->driver) {
70 ide_driver_t *drv = to_ide_driver(dev->driver);
71
72 if (drv->resume)
73 drv->resume(drive);
74 }
75
76 return err;
77}
78
79void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
80{
81 struct request_pm_state *pm = rq->data;
82
83#ifdef DEBUG_PM
84 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
85 drive->name, pm->pm_step);
86#endif
87 if (drive->media != ide_disk)
88 return;
89
90 switch (pm->pm_step) {
91 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
92 if (pm->pm_state == PM_EVENT_FREEZE)
93 pm->pm_step = IDE_PM_COMPLETED;
94 else
95 pm->pm_step = IDE_PM_STANDBY;
96 break;
97 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
98 pm->pm_step = IDE_PM_COMPLETED;
99 break;
100 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
101 pm->pm_step = IDE_PM_IDLE;
102 break;
103 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
104 pm->pm_step = IDE_PM_RESTORE_DMA;
105 break;
106 }
107}
108
109ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
110{
111 struct request_pm_state *pm = rq->data;
112 ide_task_t *args = rq->special;
113
114 memset(args, 0, sizeof(*args));
115
116 switch (pm->pm_step) {
117 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
118 if (drive->media != ide_disk)
119 break;
120 /* Not supported? Switch to next step now. */
121 if (ata_id_flush_enabled(drive->id) == 0 ||
122 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
123 ide_complete_power_step(drive, rq);
124 return ide_stopped;
125 }
126 if (ata_id_flush_ext_enabled(drive->id))
127 args->tf.command = ATA_CMD_FLUSH_EXT;
128 else
129 args->tf.command = ATA_CMD_FLUSH;
130 goto out_do_tf;
131 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
132 args->tf.command = ATA_CMD_STANDBYNOW1;
133 goto out_do_tf;
134 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
135 ide_set_max_pio(drive);
136 /*
137 * skip IDE_PM_IDLE for ATAPI devices
138 */
139 if (drive->media != ide_disk)
140 pm->pm_step = IDE_PM_RESTORE_DMA;
141 else
142 ide_complete_power_step(drive, rq);
143 return ide_stopped;
144 case IDE_PM_IDLE: /* Resume step 2 (idle) */
145 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
146 goto out_do_tf;
147 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
148 /*
149 * Right now, all we do is call ide_set_dma(drive),
150 * we could be smarter and check for current xfer_speed
151 * in struct drive etc...
152 */
153 if (drive->hwif->dma_ops == NULL)
154 break;
155 /*
156 * TODO: respect IDE_DFLAG_USING_DMA
157 */
158 ide_set_dma(drive);
159 break;
160 }
161
162 pm->pm_step = IDE_PM_COMPLETED;
163 return ide_stopped;
164
165out_do_tf:
166 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
167 args->data_phase = TASKFILE_NO_DATA;
168 return do_rw_taskfile(drive, args);
169}
170
171/**
172 * ide_complete_pm_request - end the current Power Management request
173 * @drive: target drive
174 * @rq: request
175 *
176 * This function cleans up the current PM request and stops the queue
177 * if necessary.
178 */
179void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
180{
181 struct request_queue *q = drive->queue;
182 unsigned long flags;
183
184#ifdef DEBUG_PM
185 printk("%s: completing PM request, %s\n", drive->name,
186 blk_pm_suspend_request(rq) ? "suspend" : "resume");
187#endif
188 spin_lock_irqsave(q->queue_lock, flags);
189 if (blk_pm_suspend_request(rq)) {
190 blk_stop_queue(q);
191 } else {
192 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
193 blk_start_queue(q);
194 }
195 spin_unlock_irqrestore(q->queue_lock, flags);
196
197 drive->hwif->hwgroup->rq = NULL;
198
199 if (blk_end_request(rq, 0, 0))
200 BUG();
201}
202
203void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
204{
205 struct request_pm_state *pm = rq->data;
206
207 if (blk_pm_suspend_request(rq) &&
208 pm->pm_step == IDE_PM_START_SUSPEND)
209 /* Mark drive blocked when starting the suspend sequence. */
210 drive->dev_flags |= IDE_DFLAG_BLOCKED;
211 else if (blk_pm_resume_request(rq) &&
212 pm->pm_step == IDE_PM_START_RESUME) {
213 /*
214 * The first thing we do on wakeup is to wait for BSY bit to
215 * go away (with a looong timeout) as a drive on this hwif may
216 * just be POSTing itself.
217 * We do that before even selecting as the "other" device on
218 * the bus may be broken enough to walk on our toes at this
219 * point.
220 */
221 ide_hwif_t *hwif = drive->hwif;
222 int rc;
223#ifdef DEBUG_PM
224 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
225#endif
226 rc = ide_wait_not_busy(hwif, 35000);
227 if (rc)
228 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
229 SELECT_DRIVE(drive);
230 hwif->tp_ops->set_irq(hwif, 1);
231 rc = ide_wait_not_busy(hwif, 100000);
232 if (rc)
233 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
234 }
235}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index c55bdbd2231..a64ec259f3d 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -110,20 +110,22 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
110 * read and parse the results. This function is run with 110 * read and parse the results. This function is run with
111 * interrupts disabled. 111 * interrupts disabled.
112 */ 112 */
113 113
114static inline void do_identify (ide_drive_t *drive, u8 cmd) 114static void do_identify(ide_drive_t *drive, u8 cmd)
115{ 115{
116 ide_hwif_t *hwif = HWIF(drive); 116 ide_hwif_t *hwif = HWIF(drive);
117 u16 *id = drive->id; 117 u16 *id = drive->id;
118 char *m = (char *)&id[ATA_ID_PROD]; 118 char *m = (char *)&id[ATA_ID_PROD];
119 unsigned long flags;
119 int bswap = 1, is_cfa; 120 int bswap = 1, is_cfa;
120 121
122 /* local CPU only; some systems need this */
123 local_irq_save(flags);
121 /* read 512 bytes of id info */ 124 /* read 512 bytes of id info */
122 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); 125 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
126 local_irq_restore(flags);
123 127
124 drive->dev_flags |= IDE_DFLAG_ID_READ; 128 drive->dev_flags |= IDE_DFLAG_ID_READ;
125
126 local_irq_enable();
127#ifdef DEBUG 129#ifdef DEBUG
128 printk(KERN_INFO "%s: dumping identify data\n", drive->name); 130 printk(KERN_INFO "%s: dumping identify data\n", drive->name);
129 ide_dump_identify((u8 *)id); 131 ide_dump_identify((u8 *)id);
@@ -306,17 +308,12 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
306 s = tp_ops->read_status(hwif); 308 s = tp_ops->read_status(hwif);
307 309
308 if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) { 310 if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
309 unsigned long flags;
310
311 /* local CPU only; some systems need this */
312 local_irq_save(flags);
313 /* drive returned ID */ 311 /* drive returned ID */
314 do_identify(drive, cmd); 312 do_identify(drive, cmd);
315 /* drive responded with ID */ 313 /* drive responded with ID */
316 rc = 0; 314 rc = 0;
317 /* clear drive IRQ */ 315 /* clear drive IRQ */
318 (void)tp_ops->read_status(hwif); 316 (void)tp_ops->read_status(hwif);
319 local_irq_restore(flags);
320 } else { 317 } else {
321 /* drive refused ID */ 318 /* drive refused ID */
322 rc = 2; 319 rc = 2;
@@ -554,8 +551,8 @@ static void enable_nest (ide_drive_t *drive)
554 * 1 device was found 551 * 1 device was found
555 * (note: IDE_DFLAG_PRESENT might still be not set) 552 * (note: IDE_DFLAG_PRESENT might still be not set)
556 */ 553 */
557 554
558static inline u8 probe_for_drive (ide_drive_t *drive) 555static u8 probe_for_drive(ide_drive_t *drive)
559{ 556{
560 char *m; 557 char *m;
561 558
@@ -642,7 +639,7 @@ static int ide_register_port(ide_hwif_t *hwif)
642 int ret; 639 int ret;
643 640
644 /* register with global device tree */ 641 /* register with global device tree */
645 strlcpy(hwif->gendev.bus_id,hwif->name,BUS_ID_SIZE); 642 dev_set_name(&hwif->gendev, hwif->name);
646 hwif->gendev.driver_data = hwif; 643 hwif->gendev.driver_data = hwif;
647 if (hwif->gendev.parent == NULL) { 644 if (hwif->gendev.parent == NULL) {
648 if (hwif->dev) 645 if (hwif->dev)
@@ -864,31 +861,6 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
864} 861}
865 862
866/* 863/*
867 * save_match() is used to simplify logic in init_irq() below.
868 *
869 * A loophole here is that we may not know about a particular
870 * hwif's irq until after that hwif is actually probed/initialized..
871 * This could be a problem for the case where an hwif is on a
872 * dual interface that requires serialization (eg. cmd640) and another
873 * hwif using one of the same irqs is initialized beforehand.
874 *
875 * This routine detects and reports such situations, but does not fix them.
876 */
877static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
878{
879 ide_hwif_t *m = *match;
880
881 if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
882 if (!new->hwgroup)
883 return;
884 printk(KERN_WARNING "%s: potential IRQ problem with %s and %s\n",
885 hwif->name, new->name, m->name);
886 }
887 if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
888 *match = new;
889}
890
891/*
892 * init request queue 864 * init request queue
893 */ 865 */
894static int ide_init_queue(ide_drive_t *drive) 866static int ide_init_queue(ide_drive_t *drive)
@@ -906,7 +878,8 @@ static int ide_init_queue(ide_drive_t *drive)
906 * do not. 878 * do not.
907 */ 879 */
908 880
909 q = blk_init_queue_node(do_ide_request, &ide_lock, hwif_to_node(hwif)); 881 q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock,
882 hwif_to_node(hwif));
910 if (!q) 883 if (!q)
911 return 1; 884 return 1;
912 885
@@ -947,7 +920,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
947{ 920{
948 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; 921 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
949 922
950 spin_lock_irq(&ide_lock); 923 spin_lock_irq(&hwgroup->lock);
951 if (!hwgroup->drive) { 924 if (!hwgroup->drive) {
952 /* first drive for hwgroup. */ 925 /* first drive for hwgroup. */
953 drive->next = drive; 926 drive->next = drive;
@@ -957,7 +930,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
957 drive->next = hwgroup->drive->next; 930 drive->next = hwgroup->drive->next;
958 hwgroup->drive->next = drive; 931 hwgroup->drive->next = drive;
959 } 932 }
960 spin_unlock_irq(&ide_lock); 933 spin_unlock_irq(&hwgroup->lock);
961} 934}
962 935
963/* 936/*
@@ -1002,7 +975,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
1002 975
1003 ide_ports[hwif->index] = NULL; 976 ide_ports[hwif->index] = NULL;
1004 977
1005 spin_lock_irq(&ide_lock); 978 spin_lock_irq(&hwgroup->lock);
1006 /* 979 /*
1007 * Remove us from the hwgroup, and free 980 * Remove us from the hwgroup, and free
1008 * the hwgroup if we were the only member 981 * the hwgroup if we were the only member
@@ -1030,7 +1003,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
1030 } 1003 }
1031 BUG_ON(hwgroup->hwif == hwif); 1004 BUG_ON(hwgroup->hwif == hwif);
1032 } 1005 }
1033 spin_unlock_irq(&ide_lock); 1006 spin_unlock_irq(&hwgroup->lock);
1034} 1007}
1035 1008
1036/* 1009/*
@@ -1051,27 +1024,13 @@ static int init_irq (ide_hwif_t *hwif)
1051 mutex_lock(&ide_cfg_mtx); 1024 mutex_lock(&ide_cfg_mtx);
1052 hwif->hwgroup = NULL; 1025 hwif->hwgroup = NULL;
1053 1026
1054 /*
1055 * Group up with any other hwifs that share our irq(s).
1056 */
1057 for (index = 0; index < MAX_HWIFS; index++) { 1027 for (index = 0; index < MAX_HWIFS; index++) {
1058 ide_hwif_t *h = ide_ports[index]; 1028 ide_hwif_t *h = ide_ports[index];
1059 1029
1060 if (h && h->hwgroup) { /* scan only initialized ports */ 1030 if (h && h->hwgroup) { /* scan only initialized ports */
1061 if (hwif->irq == h->irq) { 1031 if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
1062 hwif->sharing_irq = h->sharing_irq = 1; 1032 if (hwif->host == h->host)
1063 if (hwif->chipset != ide_pci || 1033 match = h;
1064 h->chipset != ide_pci) {
1065 save_match(hwif, h, &match);
1066 }
1067 }
1068 if (hwif->serialized) {
1069 if (hwif->mate && hwif->mate->irq == h->irq)
1070 save_match(hwif, h, &match);
1071 }
1072 if (h->serialized) {
1073 if (h->mate && hwif->irq == h->mate->irq)
1074 save_match(hwif, h, &match);
1075 } 1034 }
1076 } 1035 }
1077 } 1036 }
@@ -1092,17 +1051,19 @@ static int init_irq (ide_hwif_t *hwif)
1092 * linked list, the first entry is the hwif that owns 1051 * linked list, the first entry is the hwif that owns
1093 * hwgroup->handler - do not change that. 1052 * hwgroup->handler - do not change that.
1094 */ 1053 */
1095 spin_lock_irq(&ide_lock); 1054 spin_lock_irq(&hwgroup->lock);
1096 hwif->next = hwgroup->hwif->next; 1055 hwif->next = hwgroup->hwif->next;
1097 hwgroup->hwif->next = hwif; 1056 hwgroup->hwif->next = hwif;
1098 BUG_ON(hwif->next == hwif); 1057 BUG_ON(hwif->next == hwif);
1099 spin_unlock_irq(&ide_lock); 1058 spin_unlock_irq(&hwgroup->lock);
1100 } else { 1059 } else {
1101 hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO, 1060 hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO,
1102 hwif_to_node(hwif)); 1061 hwif_to_node(hwif));
1103 if (hwgroup == NULL) 1062 if (hwgroup == NULL)
1104 goto out_up; 1063 goto out_up;
1105 1064
1065 spin_lock_init(&hwgroup->lock);
1066
1106 hwif->hwgroup = hwgroup; 1067 hwif->hwgroup = hwgroup;
1107 hwgroup->hwif = hwif->next = hwif; 1068 hwgroup->hwif = hwif->next = hwif;
1108 1069
@@ -1122,8 +1083,7 @@ static int init_irq (ide_hwif_t *hwif)
1122 sa = IRQF_SHARED; 1083 sa = IRQF_SHARED;
1123#endif /* __mc68000__ */ 1084#endif /* __mc68000__ */
1124 1085
1125 if (hwif->chipset == ide_pci || hwif->chipset == ide_cmd646 || 1086 if (hwif->chipset == ide_pci)
1126 hwif->chipset == ide_ali14xx)
1127 sa = IRQF_SHARED; 1087 sa = IRQF_SHARED;
1128 1088
1129 if (io_ports->ctl_addr) 1089 if (io_ports->ctl_addr)
@@ -1150,8 +1110,7 @@ static int init_irq (ide_hwif_t *hwif)
1150 io_ports->data_addr, hwif->irq); 1110 io_ports->data_addr, hwif->irq);
1151#endif /* __mc68000__ */ 1111#endif /* __mc68000__ */
1152 if (match) 1112 if (match)
1153 printk(KERN_CONT " (%sed with %s)", 1113 printk(KERN_CONT " (serialized with %s)", match->name);
1154 hwif->sharing_irq ? "shar" : "serializ", match->name);
1155 printk(KERN_CONT "\n"); 1114 printk(KERN_CONT "\n");
1156 1115
1157 mutex_unlock(&ide_cfg_mtx); 1116 mutex_unlock(&ide_cfg_mtx);
@@ -1263,20 +1222,21 @@ static void ide_remove_drive_from_hwgroup(ide_drive_t *drive)
1263static void drive_release_dev (struct device *dev) 1222static void drive_release_dev (struct device *dev)
1264{ 1223{
1265 ide_drive_t *drive = container_of(dev, ide_drive_t, gendev); 1224 ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
1225 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1266 1226
1267 ide_proc_unregister_device(drive); 1227 ide_proc_unregister_device(drive);
1268 1228
1269 spin_lock_irq(&ide_lock); 1229 spin_lock_irq(&hwgroup->lock);
1270 ide_remove_drive_from_hwgroup(drive); 1230 ide_remove_drive_from_hwgroup(drive);
1271 kfree(drive->id); 1231 kfree(drive->id);
1272 drive->id = NULL; 1232 drive->id = NULL;
1273 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 1233 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
1274 /* Messed up locking ... */ 1234 /* Messed up locking ... */
1275 spin_unlock_irq(&ide_lock); 1235 spin_unlock_irq(&hwgroup->lock);
1276 blk_cleanup_queue(drive->queue); 1236 blk_cleanup_queue(drive->queue);
1277 spin_lock_irq(&ide_lock); 1237 spin_lock_irq(&hwgroup->lock);
1278 drive->queue = NULL; 1238 drive->queue = NULL;
1279 spin_unlock_irq(&ide_lock); 1239 spin_unlock_irq(&hwgroup->lock);
1280 1240
1281 complete(&drive->gendev_rel_comp); 1241 complete(&drive->gendev_rel_comp);
1282} 1242}
@@ -1352,7 +1312,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
1352 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 1312 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
1353 continue; 1313 continue;
1354 1314
1355 snprintf(dev->bus_id, BUS_ID_SIZE, "%u.%u", hwif->index, i); 1315 dev_set_name(dev, "%u.%u", hwif->index, i);
1356 dev->parent = &hwif->gendev; 1316 dev->parent = &hwif->gendev;
1357 dev->bus = &ide_bus_type; 1317 dev->bus = &ide_bus_type;
1358 dev->driver_data = drive; 1318 dev->driver_data = drive;
@@ -1436,13 +1396,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1436 } 1396 }
1437 1397
1438 if ((d->host_flags & IDE_HFLAG_SERIALIZE) || 1398 if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
1439 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) { 1399 ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
1440 if (hwif->mate) 1400 hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
1441 hwif->mate->serialized = hwif->serialized = 1;
1442 }
1443 1401
1444 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1402 if (d->max_sectors)
1445 hwif->rqsize = 256; 1403 hwif->rqsize = d->max_sectors;
1446 1404
1447 /* call chipset specific routine for each enabled port */ 1405 /* call chipset specific routine for each enabled port */
1448 if (d->init_hwif) 1406 if (d->init_hwif)
@@ -1794,59 +1752,3 @@ void ide_port_scan(ide_hwif_t *hwif)
1794 ide_proc_port_register_devices(hwif); 1752 ide_proc_port_register_devices(hwif);
1795} 1753}
1796EXPORT_SYMBOL_GPL(ide_port_scan); 1754EXPORT_SYMBOL_GPL(ide_port_scan);
1797
1798static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
1799 u8 port_no, const struct ide_port_info *d,
1800 unsigned long config)
1801{
1802 unsigned long base, ctl;
1803 int irq;
1804
1805 if (port_no == 0) {
1806 base = 0x1f0;
1807 ctl = 0x3f6;
1808 irq = 14;
1809 } else {
1810 base = 0x170;
1811 ctl = 0x376;
1812 irq = 15;
1813 }
1814
1815 if (!request_region(base, 8, d->name)) {
1816 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
1817 d->name, base, base + 7);
1818 return;
1819 }
1820
1821 if (!request_region(ctl, 1, d->name)) {
1822 printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
1823 d->name, ctl);
1824 release_region(base, 8);
1825 return;
1826 }
1827
1828 ide_std_init_ports(hw, base, ctl);
1829 hw->irq = irq;
1830 hw->chipset = d->chipset;
1831 hw->config = config;
1832
1833 hws[port_no] = hw;
1834}
1835
1836int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1837{
1838 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
1839
1840 memset(&hw, 0, sizeof(hw));
1841
1842 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1843 ide_legacy_init_one(hws, &hw[0], 0, d, config);
1844 ide_legacy_init_one(hws, &hw[1], 1, d, config);
1845
1846 if (hws[0] == NULL && hws[1] == NULL &&
1847 (d->host_flags & IDE_HFLAG_SINGLE))
1848 return -ENOENT;
1849
1850 return ide_host_add(d, hws, NULL);
1851}
1852EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index f3cddd1b2f8..a14e2938e4f 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -46,10 +46,6 @@ static int proc_ide_read_imodel
46 case ide_qd65xx: name = "qd65xx"; break; 46 case ide_qd65xx: name = "qd65xx"; break;
47 case ide_umc8672: name = "umc8672"; break; 47 case ide_umc8672: name = "umc8672"; break;
48 case ide_ht6560b: name = "ht6560b"; break; 48 case ide_ht6560b: name = "ht6560b"; break;
49 case ide_rz1000: name = "rz1000"; break;
50 case ide_trm290: name = "trm290"; break;
51 case ide_cmd646: name = "cmd646"; break;
52 case ide_cy82c693: name = "cy82c693"; break;
53 case ide_4drives: name = "4drives"; break; 49 case ide_4drives: name = "4drives"; break;
54 case ide_pmac: name = "mac-io"; break; 50 case ide_pmac: name = "mac-io"; break;
55 case ide_au1xxx: name = "au1xxx"; break; 51 case ide_au1xxx: name = "au1xxx"; break;
@@ -155,13 +151,8 @@ static int ide_read_setting(ide_drive_t *drive,
155 const struct ide_devset *ds = setting->setting; 151 const struct ide_devset *ds = setting->setting;
156 int val = -EINVAL; 152 int val = -EINVAL;
157 153
158 if (ds->get) { 154 if (ds->get)
159 unsigned long flags;
160
161 spin_lock_irqsave(&ide_lock, flags);
162 val = ds->get(drive); 155 val = ds->get(drive);
163 spin_unlock_irqrestore(&ide_lock, flags);
164 }
165 156
166 return val; 157 return val;
167} 158}
@@ -583,31 +574,19 @@ EXPORT_SYMBOL(ide_proc_register_driver);
583 * Clean up the driver specific /proc files and IDE settings 574 * Clean up the driver specific /proc files and IDE settings
584 * for a given drive. 575 * for a given drive.
585 * 576 *
586 * Takes ide_setting_mtx and ide_lock. 577 * Takes ide_setting_mtx.
587 * Caller must hold none of the locks.
588 */ 578 */
589 579
590void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) 580void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
591{ 581{
592 unsigned long flags;
593
594 ide_remove_proc_entries(drive->proc, driver->proc_entries(drive)); 582 ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
595 583
596 mutex_lock(&ide_setting_mtx); 584 mutex_lock(&ide_setting_mtx);
597 spin_lock_irqsave(&ide_lock, flags);
598 /* 585 /*
599 * ide_setting_mtx protects the settings list 586 * ide_setting_mtx protects both the settings list and the use
600 * ide_lock protects the use of settings 587 * of settings (we cannot take a setting out that is being used).
601 *
602 * so we need to hold both, ide_settings_sem because we want to
603 * modify the settings list, and ide_lock because we cannot take
604 * a setting out that is being used.
605 *
606 * OTOH both ide_{read,write}_setting are only ever used under
607 * ide_setting_mtx.
608 */ 588 */
609 drive->settings = NULL; 589 drive->settings = NULL;
610 spin_unlock_irqrestore(&ide_lock, flags);
611 mutex_unlock(&ide_setting_mtx); 590 mutex_unlock(&ide_setting_mtx);
612} 591}
613EXPORT_SYMBOL(ide_proc_unregister_driver); 592EXPORT_SYMBOL(ide_proc_unregister_driver);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 04f8f13cb9d..f0f09f702e9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -74,9 +74,6 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
74 74
75DEFINE_MUTEX(ide_cfg_mtx); 75DEFINE_MUTEX(ide_cfg_mtx);
76 76
77__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
78EXPORT_SYMBOL(ide_lock);
79
80static void ide_port_init_devices_data(ide_hwif_t *); 77static void ide_port_init_devices_data(ide_hwif_t *);
81 78
82/* 79/*
@@ -130,7 +127,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
130 } 127 }
131} 128}
132 129
133/* Called with ide_lock held. */
134static void __ide_port_unregister_devices(ide_hwif_t *hwif) 130static void __ide_port_unregister_devices(ide_hwif_t *hwif)
135{ 131{
136 int i; 132 int i;
@@ -139,10 +135,8 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
139 ide_drive_t *drive = &hwif->drives[i]; 135 ide_drive_t *drive = &hwif->drives[i];
140 136
141 if (drive->dev_flags & IDE_DFLAG_PRESENT) { 137 if (drive->dev_flags & IDE_DFLAG_PRESENT) {
142 spin_unlock_irq(&ide_lock);
143 device_unregister(&drive->gendev); 138 device_unregister(&drive->gendev);
144 wait_for_completion(&drive->gendev_rel_comp); 139 wait_for_completion(&drive->gendev_rel_comp);
145 spin_lock_irq(&ide_lock);
146 } 140 }
147 } 141 }
148} 142}
@@ -150,11 +144,9 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
150void ide_port_unregister_devices(ide_hwif_t *hwif) 144void ide_port_unregister_devices(ide_hwif_t *hwif)
151{ 145{
152 mutex_lock(&ide_cfg_mtx); 146 mutex_lock(&ide_cfg_mtx);
153 spin_lock_irq(&ide_lock);
154 __ide_port_unregister_devices(hwif); 147 __ide_port_unregister_devices(hwif);
155 hwif->present = 0; 148 hwif->present = 0;
156 ide_port_init_devices_data(hwif); 149 ide_port_init_devices_data(hwif);
157 spin_unlock_irq(&ide_lock);
158 mutex_unlock(&ide_cfg_mtx); 150 mutex_unlock(&ide_cfg_mtx);
159} 151}
160EXPORT_SYMBOL_GPL(ide_port_unregister_devices); 152EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
@@ -192,12 +184,10 @@ void ide_unregister(ide_hwif_t *hwif)
192 184
193 mutex_lock(&ide_cfg_mtx); 185 mutex_lock(&ide_cfg_mtx);
194 186
195 spin_lock_irq(&ide_lock);
196 if (hwif->present) { 187 if (hwif->present) {
197 __ide_port_unregister_devices(hwif); 188 __ide_port_unregister_devices(hwif);
198 hwif->present = 0; 189 hwif->present = 0;
199 } 190 }
200 spin_unlock_irq(&ide_lock);
201 191
202 ide_proc_unregister_port(hwif); 192 ide_proc_unregister_port(hwif);
203 193
@@ -340,6 +330,7 @@ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
340static int set_pio_mode(ide_drive_t *drive, int arg) 330static int set_pio_mode(ide_drive_t *drive, int arg)
341{ 331{
342 ide_hwif_t *hwif = drive->hwif; 332 ide_hwif_t *hwif = drive->hwif;
333 ide_hwgroup_t *hwgroup = hwif->hwgroup;
343 const struct ide_port_ops *port_ops = hwif->port_ops; 334 const struct ide_port_ops *port_ops = hwif->port_ops;
344 335
345 if (arg < 0 || arg > 255) 336 if (arg < 0 || arg > 255)
@@ -354,9 +345,9 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
354 unsigned long flags; 345 unsigned long flags;
355 346
356 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ 347 /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
357 spin_lock_irqsave(&ide_lock, flags); 348 spin_lock_irqsave(&hwgroup->lock, flags);
358 port_ops->set_pio_mode(drive, arg); 349 port_ops->set_pio_mode(drive, arg);
359 spin_unlock_irqrestore(&ide_lock, flags); 350 spin_unlock_irqrestore(&hwgroup->lock, flags);
360 } else 351 } else
361 port_ops->set_pio_mode(drive, arg); 352 port_ops->set_pio_mode(drive, arg);
362 } else { 353 } else {
@@ -397,80 +388,6 @@ ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
397ide_ext_devset_rw_sync(using_dma, using_dma); 388ide_ext_devset_rw_sync(using_dma, using_dma);
398__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); 389__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
399 390
400static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
401{
402 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
403 ide_hwif_t *hwif = HWIF(drive);
404 struct request *rq;
405 struct request_pm_state rqpm;
406 ide_task_t args;
407 int ret;
408
409 /* call ACPI _GTM only once */
410 if ((drive->dn & 1) == 0 || pair == NULL)
411 ide_acpi_get_timing(hwif);
412
413 memset(&rqpm, 0, sizeof(rqpm));
414 memset(&args, 0, sizeof(args));
415 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
416 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
417 rq->special = &args;
418 rq->data = &rqpm;
419 rqpm.pm_step = IDE_PM_START_SUSPEND;
420 if (mesg.event == PM_EVENT_PRETHAW)
421 mesg.event = PM_EVENT_FREEZE;
422 rqpm.pm_state = mesg.event;
423
424 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
425 blk_put_request(rq);
426
427 /* call ACPI _PS3 only after both devices are suspended */
428 if (ret == 0 && ((drive->dn & 1) || pair == NULL))
429 ide_acpi_set_state(hwif, 0);
430
431 return ret;
432}
433
434static int generic_ide_resume(struct device *dev)
435{
436 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
437 ide_hwif_t *hwif = HWIF(drive);
438 struct request *rq;
439 struct request_pm_state rqpm;
440 ide_task_t args;
441 int err;
442
443 /* call ACPI _PS0 / _STM only once */
444 if ((drive->dn & 1) == 0 || pair == NULL) {
445 ide_acpi_set_state(hwif, 1);
446 ide_acpi_push_timing(hwif);
447 }
448
449 ide_acpi_exec_tfs(drive);
450
451 memset(&rqpm, 0, sizeof(rqpm));
452 memset(&args, 0, sizeof(args));
453 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
454 rq->cmd_type = REQ_TYPE_PM_RESUME;
455 rq->cmd_flags |= REQ_PREEMPT;
456 rq->special = &args;
457 rq->data = &rqpm;
458 rqpm.pm_step = IDE_PM_START_RESUME;
459 rqpm.pm_state = PM_EVENT_ON;
460
461 err = blk_execute_rq(drive->queue, NULL, rq, 1);
462 blk_put_request(rq);
463
464 if (err == 0 && dev->driver) {
465 ide_driver_t *drv = to_ide_driver(dev->driver);
466
467 if (drv->resume)
468 drv->resume(drive);
469 }
470
471 return err;
472}
473
474/** 391/**
475 * ide_device_get - get an additional reference to a ide_drive_t 392 * ide_device_get - get an additional reference to a ide_drive_t
476 * @drive: device to get a reference to 393 * @drive: device to get a reference to
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 799557c25ee..624e62e5cc9 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -350,16 +350,17 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
350 .dma_timeout = pdc202xx_dma_timeout, 350 .dma_timeout = pdc202xx_dma_timeout,
351}; 351};
352 352
353#define DECLARE_PDC2026X_DEV(udma, extra_flags) \ 353#define DECLARE_PDC2026X_DEV(udma, sectors) \
354 { \ 354 { \
355 .name = DRV_NAME, \ 355 .name = DRV_NAME, \
356 .init_chipset = init_chipset_pdc202xx, \ 356 .init_chipset = init_chipset_pdc202xx, \
357 .port_ops = &pdc2026x_port_ops, \ 357 .port_ops = &pdc2026x_port_ops, \
358 .dma_ops = &pdc2026x_dma_ops, \ 358 .dma_ops = &pdc2026x_dma_ops, \
359 .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ 359 .host_flags = IDE_HFLAGS_PDC202XX, \
360 .pio_mask = ATA_PIO4, \ 360 .pio_mask = ATA_PIO4, \
361 .mwdma_mask = ATA_MWDMA2, \ 361 .mwdma_mask = ATA_MWDMA2, \
362 .udma_mask = udma, \ 362 .udma_mask = udma, \
363 .max_sectors = sectors, \
363 } 364 }
364 365
365static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { 366static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
@@ -376,8 +377,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
376 377
377 /* 1: PDC2026{2,3} */ 378 /* 1: PDC2026{2,3} */
378 DECLARE_PDC2026X_DEV(ATA_UDMA4, 0), 379 DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
379 /* 2: PDC2026{5,7} */ 380 /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
380 DECLARE_PDC2026X_DEV(ATA_UDMA5, IDE_HFLAG_RQSIZE_256), 381 DECLARE_PDC2026X_DEV(ATA_UDMA5, 256),
381}; 382};
382 383
383/** 384/**
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
index 7daf0135cba..a6414a884eb 100644
--- a/drivers/ide/rz1000.c
+++ b/drivers/ide/rz1000.c
@@ -22,34 +22,48 @@
22 22
23#define DRV_NAME "rz1000" 23#define DRV_NAME "rz1000"
24 24
25static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif) 25static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
26{ 26{
27 struct pci_dev *dev = to_pci_dev(hwif->dev);
28 u16 reg; 27 u16 reg;
29 28
30 if (!pci_read_config_word (dev, 0x40, &reg) && 29 if (!pci_read_config_word (dev, 0x40, &reg) &&
31 !pci_write_config_word(dev, 0x40, reg & 0xdfff)) { 30 !pci_write_config_word(dev, 0x40, reg & 0xdfff)) {
32 printk(KERN_INFO "%s: disabled chipset read-ahead " 31 printk(KERN_INFO "%s: disabled chipset read-ahead "
33 "(buggy RZ1000/RZ1001)\n", hwif->name); 32 "(buggy RZ1000/RZ1001)\n", pci_name(dev));
33 return 0;
34 } else { 34 } else {
35 if (hwif->mate)
36 hwif->mate->serialized = hwif->serialized = 1;
37 hwif->host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
38 printk(KERN_INFO "%s: serialized, disabled unmasking " 35 printk(KERN_INFO "%s: serialized, disabled unmasking "
39 "(buggy RZ1000/RZ1001)\n", hwif->name); 36 "(buggy RZ1000/RZ1001)\n", pci_name(dev));
37 return 1;
40 } 38 }
41} 39}
42 40
43static const struct ide_port_info rz1000_chipset __devinitdata = { 41static const struct ide_port_info rz1000_chipset __devinitdata = {
44 .name = DRV_NAME, 42 .name = DRV_NAME,
45 .init_hwif = init_hwif_rz1000,
46 .chipset = ide_rz1000,
47 .host_flags = IDE_HFLAG_NO_DMA, 43 .host_flags = IDE_HFLAG_NO_DMA,
48}; 44};
49 45
50static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) 46static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
51{ 47{
52 return ide_pci_init_one(dev, &rz1000_chipset, NULL); 48 struct ide_port_info d = rz1000_chipset;
49 int rc;
50
51 rc = pci_enable_device(dev);
52 if (rc)
53 return rc;
54
55 if (rz1000_disable_readahead(dev)) {
56 d.host_flags |= IDE_HFLAG_SERIALIZE;
57 d.host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
58 }
59
60 return ide_pci_init_one(dev, &d, NULL);
61}
62
63static void rz1000_remove(struct pci_dev *dev)
64{
65 ide_pci_remove(dev);
66 pci_disable_device(dev);
53} 67}
54 68
55static const struct pci_device_id rz1000_pci_tbl[] = { 69static const struct pci_device_id rz1000_pci_tbl[] = {
@@ -63,7 +77,7 @@ static struct pci_driver rz1000_pci_driver = {
63 .name = "RZ1000_IDE", 77 .name = "RZ1000_IDE",
64 .id_table = rz1000_pci_tbl, 78 .id_table = rz1000_pci_tbl,
65 .probe = rz1000_init_one, 79 .probe = rz1000_init_one,
66 .remove = ide_pci_remove, 80 .remove = rz1000_remove,
67}; 81};
68 82
69static int __init rz1000_ide_init(void) 83static int __init rz1000_ide_init(void)
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
index 75ea6152656..2a5ea90cf8b 100644
--- a/drivers/ide/trm290.c
+++ b/drivers/ide/trm290.c
@@ -328,10 +328,10 @@ static struct ide_dma_ops trm290_dma_ops = {
328static const struct ide_port_info trm290_chipset __devinitdata = { 328static const struct ide_port_info trm290_chipset __devinitdata = {
329 .name = DRV_NAME, 329 .name = DRV_NAME,
330 .init_hwif = init_hwif_trm290, 330 .init_hwif = init_hwif_trm290,
331 .chipset = ide_trm290,
332 .port_ops = &trm290_port_ops, 331 .port_ops = &trm290_port_ops,
333 .dma_ops = &trm290_dma_ops, 332 .dma_ops = &trm290_dma_ops,
334 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 333 .host_flags = IDE_HFLAG_TRM290 |
334 IDE_HFLAG_NO_ATAPI_DMA |
335#if 0 /* play it safe for now */ 335#if 0 /* play it safe for now */
336 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 336 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
337#endif 337#endif
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index 9120063e8f8..13b63e7fa35 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -181,7 +181,7 @@ static void tx4938ide_input_data_swap(ide_drive_t *drive, struct request *rq,
181 181
182 while (count--) 182 while (count--)
183 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); 183 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
184 __ide_flush_dcache_range((unsigned long)buf, count * 2); 184 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
185} 185}
186 186
187static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq, 187static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
@@ -195,7 +195,7 @@ static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
195 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); 195 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
196 ptr++; 196 ptr++;
197 } 197 }
198 __ide_flush_dcache_range((unsigned long)buf, count * 2); 198 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
199} 199}
200 200
201static const struct ide_tp_ops tx4938ide_tp_ops = { 201static const struct ide_tp_ops tx4938ide_tp_ops = {
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index bafb7d1a22e..97cd9e0f66f 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -259,6 +259,12 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
259 bcount = 0x10000 - (cur_addr & 0xffff); 259 bcount = 0x10000 - (cur_addr & 0xffff);
260 if (bcount > cur_len) 260 if (bcount > cur_len)
261 bcount = cur_len; 261 bcount = cur_len;
262 /*
263 * This workaround for zero count seems required.
264 * (standard ide_build_dmatable do it too)
265 */
266 if ((bcount & 0xffff) == 0x0000)
267 bcount = 0x8000;
262 *table++ = bcount & 0xffff; 268 *table++ = bcount & 0xffff;
263 *table++ = cur_addr; 269 *table++ = cur_addr;
264 cur_addr += bcount; 270 cur_addr += bcount;
@@ -558,7 +564,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq,
558 564
559 while (count--) 565 while (count--)
560 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); 566 *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
561 __ide_flush_dcache_range((unsigned long)buf, count * 2); 567 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
562} 568}
563 569
564static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, 570static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
@@ -572,7 +578,7 @@ static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
572 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); 578 __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
573 ptr++; 579 ptr++;
574 } 580 }
575 __ide_flush_dcache_range((unsigned long)buf, count * 2); 581 __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
576} 582}
577 583
578static const struct ide_tp_ops tx4939ide_tp_ops = { 584static const struct ide_tp_ops tx4939ide_tp_ops = {
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 1da076e0c91..e29978cf619 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -107,18 +107,21 @@ static void umc_set_speeds(u8 speeds[])
107static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) 107static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
108{ 108{
109 ide_hwif_t *hwif = drive->hwif; 109 ide_hwif_t *hwif = drive->hwif;
110 unsigned long flags; 110 ide_hwgroup_t *mate_hwgroup = hwif->mate ? hwif->mate->hwgroup : NULL;
111 unsigned long uninitialized_var(flags);
111 112
112 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", 113 printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
113 drive->name, pio, pio_to_umc[pio]); 114 drive->name, pio, pio_to_umc[pio]);
114 spin_lock_irqsave(&ide_lock, flags); 115 if (mate_hwgroup)
115 if (hwif->mate && hwif->mate->hwgroup->handler) { 116 spin_lock_irqsave(&mate_hwgroup->lock, flags);
117 if (mate_hwgroup && mate_hwgroup->handler) {
116 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); 118 printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
117 } else { 119 } else {
118 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; 120 current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
119 umc_set_speeds(current_speeds); 121 umc_set_speeds(current_speeds);
120 } 122 }
121 spin_unlock_irqrestore(&ide_lock, flags); 123 if (mate_hwgroup)
124 spin_unlock_irqrestore(&mate_hwgroup->lock, flags);
122} 125}
123 126
124static const struct ide_port_ops umc8672_port_ops = { 127static const struct ide_port_ops umc8672_port_ops = {
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 2370fd82ebf..c24140aff8e 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -578,6 +578,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
578{ 578{
579 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); 579 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
580 ide_drive_t *drive = scsi->drive; 580 ide_drive_t *drive = scsi->drive;
581 ide_hwif_t *hwif;
582 ide_hwgroup_t *hwgroup;
581 int busy; 583 int busy;
582 int ret = FAILED; 584 int ret = FAILED;
583 585
@@ -594,13 +596,16 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
594 goto no_drive; 596 goto no_drive;
595 } 597 }
596 598
597 /* First give it some more time, how much is "right" is hard to say :-( */ 599 hwif = drive->hwif;
600 hwgroup = hwif->hwgroup;
598 601
599 busy = ide_wait_not_busy(HWIF(drive), 100); /* FIXME - uses mdelay which causes latency? */ 602 /* First give it some more time, how much is "right" is hard to say :-(
603 FIXME - uses mdelay which causes latency? */
604 busy = ide_wait_not_busy(hwif, 100);
600 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 605 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
601 printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":""); 606 printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":"");
602 607
603 spin_lock_irq(&ide_lock); 608 spin_lock_irq(&hwgroup->lock);
604 609
605 /* If there is no pc running we're done (our interrupt took care of it) */ 610 /* If there is no pc running we're done (our interrupt took care of it) */
606 pc = drive->pc; 611 pc = drive->pc;
@@ -629,7 +634,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
629 } 634 }
630 635
631ide_unlock: 636ide_unlock:
632 spin_unlock_irq(&ide_lock); 637 spin_unlock_irq(&hwgroup->lock);
633no_drive: 638no_drive:
634 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 639 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
635 printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed"); 640 printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed");
@@ -642,6 +647,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
642 struct request *req; 647 struct request *req;
643 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); 648 idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
644 ide_drive_t *drive = scsi->drive; 649 ide_drive_t *drive = scsi->drive;
650 ide_hwgroup_t *hwgroup;
645 int ready = 0; 651 int ready = 0;
646 int ret = SUCCESS; 652 int ret = SUCCESS;
647 653
@@ -658,14 +664,18 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
658 return FAILED; 664 return FAILED;
659 } 665 }
660 666
667 hwgroup = drive->hwif->hwgroup;
668
661 spin_lock_irq(cmd->device->host->host_lock); 669 spin_lock_irq(cmd->device->host->host_lock);
662 spin_lock(&ide_lock); 670 spin_lock(&hwgroup->lock);
663 671
664 pc = drive->pc; 672 pc = drive->pc;
673 if (pc)
674 req = pc->rq;
665 675
666 if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) { 676 if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) {
667 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n"); 677 printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
668 spin_unlock(&ide_lock); 678 spin_unlock(&hwgroup->lock);
669 spin_unlock_irq(cmd->device->host->host_lock); 679 spin_unlock_irq(cmd->device->host->host_lock);
670 return FAILED; 680 return FAILED;
671 } 681 }
@@ -685,10 +695,10 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
685 BUG(); 695 BUG();
686 } 696 }
687 697
688 HWGROUP(drive)->rq = NULL; 698 hwgroup->rq = NULL;
689 HWGROUP(drive)->handler = NULL; 699 hwgroup->handler = NULL;
690 HWGROUP(drive)->busy = 1; /* will set this to zero when ide reset finished */ 700 hwgroup->busy = 1; /* will set this to zero when ide reset finished */
691 spin_unlock(&ide_lock); 701 spin_unlock(&hwgroup->lock);
692 702
693 ide_do_reset(drive); 703 ide_do_reset(drive);
694 704
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 010fb26a157..e99c56de7f5 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -122,8 +122,6 @@ struct ide_io_ports {
122#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ 122#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
123#define SECTOR_SIZE 512 123#define SECTOR_SIZE 512
124 124
125#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
126
127/* 125/*
128 * Timeouts for various operations: 126 * Timeouts for various operations:
129 */ 127 */
@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *);
172enum { ide_unknown, ide_generic, ide_pci, 170enum { ide_unknown, ide_generic, ide_pci,
173 ide_cmd640, ide_dtc2278, ide_ali14xx, 171 ide_cmd640, ide_dtc2278, ide_ali14xx,
174 ide_qd65xx, ide_umc8672, ide_ht6560b, 172 ide_qd65xx, ide_umc8672, ide_ht6560b,
175 ide_rz1000, ide_trm290, 173 ide_4drives, ide_pmac, ide_acorn,
176 ide_cmd646, ide_cy82c693, ide_4drives,
177 ide_pmac, ide_acorn,
178 ide_au1xxx, ide_palm3710 174 ide_au1xxx, ide_palm3710
179}; 175};
180 176
@@ -496,8 +492,6 @@ enum {
496 * when more than one interrupt is needed. 492 * when more than one interrupt is needed.
497 */ 493 */
498 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), 494 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
499 /* Seeking in progress. */
500 IDE_AFLAG_SEEKING = (1 << 8),
501 /* Saved TOC information is current. */ 495 /* Saved TOC information is current. */
502 IDE_AFLAG_TOC_VALID = (1 << 9), 496 IDE_AFLAG_TOC_VALID = (1 << 9),
503 /* We think that the drive door is locked. */ 497 /* We think that the drive door is locked. */
@@ -845,8 +839,6 @@ typedef struct hwif_s {
845 unsigned extra_ports; /* number of extra dma ports */ 839 unsigned extra_ports; /* number of extra dma ports */
846 840
847 unsigned present : 1; /* this interface exists */ 841 unsigned present : 1; /* this interface exists */
848 unsigned serialized : 1; /* serialized all channel operation */
849 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
850 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 842 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
851 843
852 struct device gendev; 844 struct device gendev;
@@ -909,6 +901,8 @@ typedef struct hwgroup_s {
909 901
910 int req_gen; 902 int req_gen;
911 int req_gen_timer; 903 int req_gen_timer;
904
905 spinlock_t lock;
912} ide_hwgroup_t; 906} ide_hwgroup_t;
913 907
914typedef struct ide_driver_s ide_driver_t; 908typedef struct ide_driver_s ide_driver_t;
@@ -1122,6 +1116,14 @@ enum {
1122 IDE_PM_COMPLETED, 1116 IDE_PM_COMPLETED,
1123}; 1117};
1124 1118
1119int generic_ide_suspend(struct device *, pm_message_t);
1120int generic_ide_resume(struct device *);
1121
1122void ide_complete_power_step(ide_drive_t *, struct request *);
1123ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
1124void ide_complete_pm_request(ide_drive_t *, struct request *);
1125void ide_check_pm_state(ide_drive_t *, struct request *);
1126
1125/* 1127/*
1126 * Subdrivers support. 1128 * Subdrivers support.
1127 * 1129 *
@@ -1376,8 +1378,8 @@ enum {
1376 IDE_HFLAG_LEGACY_IRQS = (1 << 21), 1378 IDE_HFLAG_LEGACY_IRQS = (1 << 21),
1377 /* force use of legacy IRQs */ 1379 /* force use of legacy IRQs */
1378 IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), 1380 IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
1379 /* limit LBA48 requests to 256 sectors */ 1381 /* host is TRM290 */
1380 IDE_HFLAG_RQSIZE_256 = (1 << 23), 1382 IDE_HFLAG_TRM290 = (1 << 23),
1381 /* use 32-bit I/O ops */ 1383 /* use 32-bit I/O ops */
1382 IDE_HFLAG_IO_32BIT = (1 << 24), 1384 IDE_HFLAG_IO_32BIT = (1 << 24),
1383 /* unmask IRQs */ 1385 /* unmask IRQs */
@@ -1415,6 +1417,9 @@ struct ide_port_info {
1415 1417
1416 ide_pci_enablebit_t enablebits[2]; 1418 ide_pci_enablebit_t enablebits[2];
1417 hwif_chipset_t chipset; 1419 hwif_chipset_t chipset;
1420
1421 u16 max_sectors; /* if < than the default one */
1422
1418 u32 host_flags; 1423 u32 host_flags;
1419 u8 pio_mask; 1424 u8 pio_mask;
1420 u8 swdma_mask; 1425 u8 swdma_mask;
@@ -1610,13 +1615,13 @@ extern struct mutex ide_cfg_mtx;
1610/* 1615/*
1611 * Structure locking: 1616 * Structure locking:
1612 * 1617 *
1613 * ide_cfg_mtx and ide_lock together protect changes to 1618 * ide_cfg_mtx and hwgroup->lock together protect changes to
1614 * ide_hwif_t->{next,hwgroup} 1619 * ide_hwif_t->next
1615 * ide_drive_t->next 1620 * ide_drive_t->next
1616 * 1621 *
1617 * ide_hwgroup_t->busy: ide_lock 1622 * ide_hwgroup_t->busy: hwgroup->lock
1618 * ide_hwgroup_t->hwif: ide_lock 1623 * ide_hwgroup_t->hwif: hwgroup->lock
1619 * ide_hwif_t->mate: constant, no locking 1624 * ide_hwif_t->{hwgroup,mate}: constant, no locking
1620 * ide_drive_t->hwif: constant, no locking 1625 * ide_drive_t->hwif: constant, no locking
1621 */ 1626 */
1622 1627