diff options
author | Len Brown <len.brown@intel.com> | 2006-01-07 03:50:18 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2006-01-07 03:50:18 -0500 |
commit | ed03f430cdc8c802652467e9097606fedc2c7abc (patch) | |
tree | 30941ec1e6f93e99358fefe18175e5dd800a4379 /drivers/ide | |
parent | ed349a8a0a780ed27e2a765f16cee54d9b63bfee (diff) | |
parent | 6f957eaf79356a32e838f5f262ee9a60544b1d5b (diff) |
Pull pnpacpi into acpica branch
Diffstat (limited to 'drivers/ide')
-rw-r--r-- | drivers/ide/Kconfig | 10 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 12 | ||||
-rw-r--r-- | drivers/ide/ide-cd.h | 1 | ||||
-rw-r--r-- | drivers/ide/ide-disk.c | 142 | ||||
-rw-r--r-- | drivers/ide/ide-dma.c | 15 | ||||
-rw-r--r-- | drivers/ide/ide-floppy.c | 1 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 11 | ||||
-rw-r--r-- | drivers/ide/ide-tape.c | 1 | ||||
-rw-r--r-- | drivers/ide/ide.c | 60 | ||||
-rw-r--r-- | drivers/ide/legacy/ide-cs.c | 132 | ||||
-rw-r--r-- | drivers/ide/mips/Makefile | 3 | ||||
-rw-r--r-- | drivers/ide/mips/au1xxx-ide.c | 1498 | ||||
-rw-r--r-- | drivers/ide/pci/sgiioc4.c | 8 | ||||
-rw-r--r-- | drivers/ide/pci/via82cxxx.c | 1 |
14 files changed, 723 insertions, 1172 deletions
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 31e649a9ff71..1c81174595b3 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -807,14 +807,6 @@ config BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | |||
807 | depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX | 807 | depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX |
808 | endchoice | 808 | endchoice |
809 | 809 | ||
810 | config BLK_DEV_IDE_AU1XXX_BURSTABLE_ON | ||
811 | bool "Enable burstable Mode on DbDMA" | ||
812 | default false | ||
813 | depends BLK_DEV_IDE_AU1XXX | ||
814 | help | ||
815 | This option enable the burstable Flag on DbDMA controller | ||
816 | (cf. "AMD Alchemy 'Au1200' Processor Data Book - PRELIMINARY"). | ||
817 | |||
818 | config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ | 810 | config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ |
819 | int "Maximum transfer size (KB) per request (up to 128)" | 811 | int "Maximum transfer size (KB) per request (up to 128)" |
820 | default "128" | 812 | default "128" |
@@ -940,7 +932,7 @@ config BLK_DEV_Q40IDE | |||
940 | 932 | ||
941 | config BLK_DEV_MPC8xx_IDE | 933 | config BLK_DEV_MPC8xx_IDE |
942 | bool "MPC8xx IDE support" | 934 | bool "MPC8xx IDE support" |
943 | depends on 8xx | 935 | depends on 8xx && IDE=y && BLK_DEV_IDE=y |
944 | help | 936 | help |
945 | This option provides support for IDE on Motorola MPC8xx Systems. | 937 | This option provides support for IDE on Motorola MPC8xx Systems. |
946 | Please see 'Type of MPC8xx IDE interface' for details. | 938 | Please see 'Type of MPC8xx IDE interface' for details. |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 9455e42abb23..d31117eb95aa 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate) | |||
614 | */ | 614 | */ |
615 | spin_lock_irqsave(&ide_lock, flags); | 615 | spin_lock_irqsave(&ide_lock, flags); |
616 | end_that_request_chunk(failed, 0, failed->data_len); | 616 | end_that_request_chunk(failed, 0, failed->data_len); |
617 | end_that_request_last(failed); | 617 | end_that_request_last(failed, 0); |
618 | spin_unlock_irqrestore(&ide_lock, flags); | 618 | spin_unlock_irqrestore(&ide_lock, flags); |
619 | } | 619 | } |
620 | 620 | ||
@@ -1292,7 +1292,6 @@ static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block) | |||
1292 | struct cdrom_info *info = drive->driver_data; | 1292 | struct cdrom_info *info = drive->driver_data; |
1293 | 1293 | ||
1294 | info->dma = 0; | 1294 | info->dma = 0; |
1295 | info->cmd = 0; | ||
1296 | info->start_seek = jiffies; | 1295 | info->start_seek = jiffies; |
1297 | return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation); | 1296 | return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation); |
1298 | } | 1297 | } |
@@ -1344,8 +1343,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block) | |||
1344 | (rq->nr_sectors & (sectors_per_frame - 1))) | 1343 | (rq->nr_sectors & (sectors_per_frame - 1))) |
1345 | info->dma = 0; | 1344 | info->dma = 0; |
1346 | 1345 | ||
1347 | info->cmd = READ; | ||
1348 | |||
1349 | /* Start sending the read request to the drive. */ | 1346 | /* Start sending the read request to the drive. */ |
1350 | return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation); | 1347 | return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation); |
1351 | } | 1348 | } |
@@ -1484,7 +1481,6 @@ static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive) | |||
1484 | struct cdrom_info *info = drive->driver_data; | 1481 | struct cdrom_info *info = drive->driver_data; |
1485 | 1482 | ||
1486 | info->dma = 0; | 1483 | info->dma = 0; |
1487 | info->cmd = 0; | ||
1488 | rq->flags &= ~REQ_FAILED; | 1484 | rq->flags &= ~REQ_FAILED; |
1489 | len = rq->data_len; | 1485 | len = rq->data_len; |
1490 | 1486 | ||
@@ -1739,7 +1735,7 @@ end_request: | |||
1739 | 1735 | ||
1740 | spin_lock_irqsave(&ide_lock, flags); | 1736 | spin_lock_irqsave(&ide_lock, flags); |
1741 | blkdev_dequeue_request(rq); | 1737 | blkdev_dequeue_request(rq); |
1742 | end_that_request_last(rq); | 1738 | end_that_request_last(rq, 1); |
1743 | HWGROUP(drive)->rq = NULL; | 1739 | HWGROUP(drive)->rq = NULL; |
1744 | spin_unlock_irqrestore(&ide_lock, flags); | 1740 | spin_unlock_irqrestore(&ide_lock, flags); |
1745 | return ide_stopped; | 1741 | return ide_stopped; |
@@ -1891,7 +1887,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq) | |||
1891 | /* use dma, if possible. we don't need to check more, since we | 1887 | /* use dma, if possible. we don't need to check more, since we |
1892 | * know that the transfer is always (at least!) frame aligned */ | 1888 | * know that the transfer is always (at least!) frame aligned */ |
1893 | info->dma = drive->using_dma ? 1 : 0; | 1889 | info->dma = drive->using_dma ? 1 : 0; |
1894 | info->cmd = WRITE; | ||
1895 | 1890 | ||
1896 | info->devinfo.media_written = 1; | 1891 | info->devinfo.media_written = 1; |
1897 | 1892 | ||
@@ -1916,7 +1911,6 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) | |||
1916 | rq->flags |= REQ_QUIET; | 1911 | rq->flags |= REQ_QUIET; |
1917 | 1912 | ||
1918 | info->dma = 0; | 1913 | info->dma = 0; |
1919 | info->cmd = 0; | ||
1920 | 1914 | ||
1921 | /* | 1915 | /* |
1922 | * sg request | 1916 | * sg request |
@@ -1925,7 +1919,6 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) | |||
1925 | int mask = drive->queue->dma_alignment; | 1919 | int mask = drive->queue->dma_alignment; |
1926 | unsigned long addr = (unsigned long) page_address(bio_page(rq->bio)); | 1920 | unsigned long addr = (unsigned long) page_address(bio_page(rq->bio)); |
1927 | 1921 | ||
1928 | info->cmd = rq_data_dir(rq); | ||
1929 | info->dma = drive->using_dma; | 1922 | info->dma = drive->using_dma; |
1930 | 1923 | ||
1931 | /* | 1924 | /* |
@@ -3516,6 +3509,7 @@ static int __init ide_cdrom_init(void) | |||
3516 | return driver_register(&ide_cdrom_driver.gen_driver); | 3509 | return driver_register(&ide_cdrom_driver.gen_driver); |
3517 | } | 3510 | } |
3518 | 3511 | ||
3512 | MODULE_ALIAS("ide:*m-cdrom*"); | ||
3519 | module_init(ide_cdrom_init); | 3513 | module_init(ide_cdrom_init); |
3520 | module_exit(ide_cdrom_exit); | 3514 | module_exit(ide_cdrom_exit); |
3521 | MODULE_LICENSE("GPL"); | 3515 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 7ca3e5afc665..ad1f2ed14a37 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h | |||
@@ -480,7 +480,6 @@ struct cdrom_info { | |||
480 | 480 | ||
481 | struct request request_sense_request; | 481 | struct request request_sense_request; |
482 | int dma; | 482 | int dma; |
483 | int cmd; | ||
484 | unsigned long last_block; | 483 | unsigned long last_block; |
485 | unsigned long start_seek; | 484 | unsigned long start_seek; |
486 | /* Buffer to hold mechanism status and changer slot table. */ | 485 | /* Buffer to hold mechanism status and changer slot table. */ |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index f4e3d3527b0e..4b441720b6ba 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = { | |||
681 | 681 | ||
682 | #endif /* CONFIG_PROC_FS */ | 682 | #endif /* CONFIG_PROC_FS */ |
683 | 683 | ||
684 | static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) | 684 | static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) |
685 | { | 685 | { |
686 | ide_drive_t *drive = q->queuedata; | 686 | ide_drive_t *drive = q->queuedata; |
687 | struct request *rq = flush_rq->end_io_data; | ||
688 | int good_sectors = rq->hard_nr_sectors; | ||
689 | int bad_sectors; | ||
690 | sector_t sector; | ||
691 | |||
692 | if (flush_rq->errors & ABRT_ERR) { | ||
693 | printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name); | ||
694 | blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE); | ||
695 | blk_queue_issue_flush_fn(drive->queue, NULL); | ||
696 | good_sectors = 0; | ||
697 | } else if (flush_rq->errors) { | ||
698 | good_sectors = 0; | ||
699 | if (blk_barrier_preflush(rq)) { | ||
700 | sector = ide_get_error_location(drive,flush_rq->buffer); | ||
701 | if ((sector >= rq->hard_sector) && | ||
702 | (sector < rq->hard_sector + rq->hard_nr_sectors)) | ||
703 | good_sectors = sector - rq->hard_sector; | ||
704 | } | ||
705 | } | ||
706 | |||
707 | if (flush_rq->errors) | ||
708 | printk(KERN_ERR "%s: failed barrier write: " | ||
709 | "sector=%Lx(good=%d/bad=%d)\n", | ||
710 | drive->name, (unsigned long long)rq->sector, | ||
711 | good_sectors, | ||
712 | (int) (rq->hard_nr_sectors-good_sectors)); | ||
713 | |||
714 | bad_sectors = rq->hard_nr_sectors - good_sectors; | ||
715 | |||
716 | if (good_sectors) | ||
717 | __ide_end_request(drive, rq, 1, good_sectors); | ||
718 | if (bad_sectors) | ||
719 | __ide_end_request(drive, rq, 0, bad_sectors); | ||
720 | } | ||
721 | |||
722 | static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) | ||
723 | { | ||
724 | ide_drive_t *drive = q->queuedata; | ||
725 | |||
726 | if (!drive->wcache) | ||
727 | return 0; | ||
728 | 687 | ||
729 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 688 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
730 | 689 | ||
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) | |||
735 | rq->cmd[0] = WIN_FLUSH_CACHE; | 694 | rq->cmd[0] = WIN_FLUSH_CACHE; |
736 | 695 | ||
737 | 696 | ||
738 | rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; | 697 | rq->flags |= REQ_DRIVE_TASK; |
739 | rq->buffer = rq->cmd; | 698 | rq->buffer = rq->cmd; |
740 | return 1; | ||
741 | } | 699 | } |
742 | 700 | ||
743 | static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, | 701 | static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, |
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg) | |||
794 | return 0; | 752 | return 0; |
795 | } | 753 | } |
796 | 754 | ||
755 | static void update_ordered(ide_drive_t *drive) | ||
756 | { | ||
757 | struct hd_driveid *id = drive->id; | ||
758 | unsigned ordered = QUEUE_ORDERED_NONE; | ||
759 | prepare_flush_fn *prep_fn = NULL; | ||
760 | issue_flush_fn *issue_fn = NULL; | ||
761 | |||
762 | if (drive->wcache) { | ||
763 | unsigned long long capacity; | ||
764 | int barrier; | ||
765 | /* | ||
766 | * We must avoid issuing commands a drive does not | ||
767 | * understand or we may crash it. We check flush cache | ||
768 | * is supported. We also check we have the LBA48 flush | ||
769 | * cache if the drive capacity is too large. By this | ||
770 | * time we have trimmed the drive capacity if LBA48 is | ||
771 | * not available so we don't need to recheck that. | ||
772 | */ | ||
773 | capacity = idedisk_capacity(drive); | ||
774 | barrier = ide_id_has_flush_cache(id) && | ||
775 | (drive->addressing == 0 || capacity <= (1ULL << 28) || | ||
776 | ide_id_has_flush_cache_ext(id)); | ||
777 | |||
778 | printk(KERN_INFO "%s: cache flushes %ssupported\n", | ||
779 | drive->name, barrier ? "" : "not"); | ||
780 | |||
781 | if (barrier) { | ||
782 | ordered = QUEUE_ORDERED_DRAIN_FLUSH; | ||
783 | prep_fn = idedisk_prepare_flush; | ||
784 | issue_fn = idedisk_issue_flush; | ||
785 | } | ||
786 | } else | ||
787 | ordered = QUEUE_ORDERED_DRAIN; | ||
788 | |||
789 | blk_queue_ordered(drive->queue, ordered, prep_fn); | ||
790 | blk_queue_issue_flush_fn(drive->queue, issue_fn); | ||
791 | } | ||
792 | |||
797 | static int write_cache(ide_drive_t *drive, int arg) | 793 | static int write_cache(ide_drive_t *drive, int arg) |
798 | { | 794 | { |
799 | ide_task_t args; | 795 | ide_task_t args; |
800 | int err; | 796 | int err = 1; |
801 | |||
802 | if (!ide_id_has_flush_cache(drive->id)) | ||
803 | return 1; | ||
804 | 797 | ||
805 | memset(&args, 0, sizeof(ide_task_t)); | 798 | if (ide_id_has_flush_cache(drive->id)) { |
806 | args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? | 799 | memset(&args, 0, sizeof(ide_task_t)); |
800 | args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? | ||
807 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; | 801 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; |
808 | args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; | 802 | args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; |
809 | args.command_type = IDE_DRIVE_TASK_NO_DATA; | 803 | args.command_type = IDE_DRIVE_TASK_NO_DATA; |
810 | args.handler = &task_no_data_intr; | 804 | args.handler = &task_no_data_intr; |
805 | err = ide_raw_taskfile(drive, &args, NULL); | ||
806 | if (err == 0) | ||
807 | drive->wcache = arg; | ||
808 | } | ||
811 | 809 | ||
812 | err = ide_raw_taskfile(drive, &args, NULL); | 810 | update_ordered(drive); |
813 | if (err) | ||
814 | return err; | ||
815 | 811 | ||
816 | drive->wcache = arg; | 812 | return err; |
817 | return 0; | ||
818 | } | 813 | } |
819 | 814 | ||
820 | static int do_idedisk_flushcache (ide_drive_t *drive) | 815 | static int do_idedisk_flushcache (ide_drive_t *drive) |
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive) | |||
888 | { | 883 | { |
889 | struct hd_driveid *id = drive->id; | 884 | struct hd_driveid *id = drive->id; |
890 | unsigned long long capacity; | 885 | unsigned long long capacity; |
891 | int barrier; | ||
892 | 886 | ||
893 | idedisk_add_settings(drive); | 887 | idedisk_add_settings(drive); |
894 | 888 | ||
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive) | |||
992 | drive->wcache = 1; | 986 | drive->wcache = 1; |
993 | 987 | ||
994 | write_cache(drive, 1); | 988 | write_cache(drive, 1); |
995 | |||
996 | /* | ||
997 | * We must avoid issuing commands a drive does not understand | ||
998 | * or we may crash it. We check flush cache is supported. We also | ||
999 | * check we have the LBA48 flush cache if the drive capacity is | ||
1000 | * too large. By this time we have trimmed the drive capacity if | ||
1001 | * LBA48 is not available so we don't need to recheck that. | ||
1002 | */ | ||
1003 | barrier = 0; | ||
1004 | if (ide_id_has_flush_cache(id)) | ||
1005 | barrier = 1; | ||
1006 | if (drive->addressing == 1) { | ||
1007 | /* Can't issue the correct flush ? */ | ||
1008 | if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id)) | ||
1009 | barrier = 0; | ||
1010 | } | ||
1011 | |||
1012 | printk(KERN_INFO "%s: cache flushes %ssupported\n", | ||
1013 | drive->name, barrier ? "" : "not "); | ||
1014 | if (barrier) { | ||
1015 | blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH); | ||
1016 | drive->queue->prepare_flush_fn = idedisk_prepare_flush; | ||
1017 | drive->queue->end_flush_fn = idedisk_end_flush; | ||
1018 | blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush); | ||
1019 | } | ||
1020 | } | 989 | } |
1021 | 990 | ||
1022 | static void ide_cacheflush_p(ide_drive_t *drive) | 991 | static void ide_cacheflush_p(ide_drive_t *drive) |
@@ -1034,12 +1003,12 @@ static int ide_disk_remove(struct device *dev) | |||
1034 | struct ide_disk_obj *idkp = drive->driver_data; | 1003 | struct ide_disk_obj *idkp = drive->driver_data; |
1035 | struct gendisk *g = idkp->disk; | 1004 | struct gendisk *g = idkp->disk; |
1036 | 1005 | ||
1037 | ide_cacheflush_p(drive); | ||
1038 | |||
1039 | ide_unregister_subdriver(drive, idkp->driver); | 1006 | ide_unregister_subdriver(drive, idkp->driver); |
1040 | 1007 | ||
1041 | del_gendisk(g); | 1008 | del_gendisk(g); |
1042 | 1009 | ||
1010 | ide_cacheflush_p(drive); | ||
1011 | |||
1043 | ide_disk_put(idkp); | 1012 | ide_disk_put(idkp); |
1044 | 1013 | ||
1045 | return 0; | 1014 | return 0; |
@@ -1271,6 +1240,7 @@ static int __init idedisk_init(void) | |||
1271 | return driver_register(&idedisk_driver.gen_driver); | 1240 | return driver_register(&idedisk_driver.gen_driver); |
1272 | } | 1241 | } |
1273 | 1242 | ||
1243 | MODULE_ALIAS("ide:*m-disk*"); | ||
1274 | module_init(idedisk_init); | 1244 | module_init(idedisk_init); |
1275 | module_exit(idedisk_exit); | 1245 | module_exit(idedisk_exit); |
1276 | MODULE_LICENSE("GPL"); | 1246 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 1e1531334c25..0523da77425a 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -90,11 +90,6 @@ | |||
90 | #include <asm/io.h> | 90 | #include <asm/io.h> |
91 | #include <asm/irq.h> | 91 | #include <asm/irq.h> |
92 | 92 | ||
93 | struct drive_list_entry { | ||
94 | const char *id_model; | ||
95 | const char *id_firmware; | ||
96 | }; | ||
97 | |||
98 | static const struct drive_list_entry drive_whitelist [] = { | 93 | static const struct drive_list_entry drive_whitelist [] = { |
99 | 94 | ||
100 | { "Micropolis 2112A" , "ALL" }, | 95 | { "Micropolis 2112A" , "ALL" }, |
@@ -139,7 +134,7 @@ static const struct drive_list_entry drive_blacklist [] = { | |||
139 | }; | 134 | }; |
140 | 135 | ||
141 | /** | 136 | /** |
142 | * in_drive_list - look for drive in black/white list | 137 | * ide_in_drive_list - look for drive in black/white list |
143 | * @id: drive identifier | 138 | * @id: drive identifier |
144 | * @drive_table: list to inspect | 139 | * @drive_table: list to inspect |
145 | * | 140 | * |
@@ -147,7 +142,7 @@ static const struct drive_list_entry drive_blacklist [] = { | |||
147 | * Returns 1 if the drive is found in the table. | 142 | * Returns 1 if the drive is found in the table. |
148 | */ | 143 | */ |
149 | 144 | ||
150 | static int in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table) | 145 | int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table) |
151 | { | 146 | { |
152 | for ( ; drive_table->id_model ; drive_table++) | 147 | for ( ; drive_table->id_model ; drive_table++) |
153 | if ((!strcmp(drive_table->id_model, id->model)) && | 148 | if ((!strcmp(drive_table->id_model, id->model)) && |
@@ -157,6 +152,8 @@ static int in_drive_list(struct hd_driveid *id, const struct drive_list_entry *d | |||
157 | return 0; | 152 | return 0; |
158 | } | 153 | } |
159 | 154 | ||
155 | EXPORT_SYMBOL_GPL(ide_in_drive_list); | ||
156 | |||
160 | /** | 157 | /** |
161 | * ide_dma_intr - IDE DMA interrupt handler | 158 | * ide_dma_intr - IDE DMA interrupt handler |
162 | * @drive: the drive the interrupt is for | 159 | * @drive: the drive the interrupt is for |
@@ -663,7 +660,7 @@ int __ide_dma_bad_drive (ide_drive_t *drive) | |||
663 | { | 660 | { |
664 | struct hd_driveid *id = drive->id; | 661 | struct hd_driveid *id = drive->id; |
665 | 662 | ||
666 | int blacklist = in_drive_list(id, drive_blacklist); | 663 | int blacklist = ide_in_drive_list(id, drive_blacklist); |
667 | if (blacklist) { | 664 | if (blacklist) { |
668 | printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", | 665 | printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", |
669 | drive->name, id->model); | 666 | drive->name, id->model); |
@@ -677,7 +674,7 @@ EXPORT_SYMBOL(__ide_dma_bad_drive); | |||
677 | int __ide_dma_good_drive (ide_drive_t *drive) | 674 | int __ide_dma_good_drive (ide_drive_t *drive) |
678 | { | 675 | { |
679 | struct hd_driveid *id = drive->id; | 676 | struct hd_driveid *id = drive->id; |
680 | return in_drive_list(id, drive_whitelist); | 677 | return ide_in_drive_list(id, drive_whitelist); |
681 | } | 678 | } |
682 | 679 | ||
683 | EXPORT_SYMBOL(__ide_dma_good_drive); | 680 | EXPORT_SYMBOL(__ide_dma_good_drive); |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 9e293c8063dc..fba3fffc2d66 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -2197,6 +2197,7 @@ static int __init idefloppy_init(void) | |||
2197 | return driver_register(&idefloppy_driver.gen_driver); | 2197 | return driver_register(&idefloppy_driver.gen_driver); |
2198 | } | 2198 | } |
2199 | 2199 | ||
2200 | MODULE_ALIAS("ide:*m-floppy*"); | ||
2200 | module_init(idefloppy_init); | 2201 | module_init(idefloppy_init); |
2201 | module_exit(idefloppy_exit); | 2202 | module_exit(idefloppy_exit); |
2202 | MODULE_LICENSE("GPL"); | 2203 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ecfafcdafea4..b5dc6df8e67d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -89,7 +89,7 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, | |||
89 | 89 | ||
90 | blkdev_dequeue_request(rq); | 90 | blkdev_dequeue_request(rq); |
91 | HWGROUP(drive)->rq = NULL; | 91 | HWGROUP(drive)->rq = NULL; |
92 | end_that_request_last(rq); | 92 | end_that_request_last(rq, uptodate); |
93 | ret = 0; | 93 | ret = 0; |
94 | } | 94 | } |
95 | return ret; | 95 | return ret; |
@@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) | |||
119 | if (!nr_sectors) | 119 | if (!nr_sectors) |
120 | nr_sectors = rq->hard_cur_sectors; | 120 | nr_sectors = rq->hard_cur_sectors; |
121 | 121 | ||
122 | if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) | 122 | ret = __ide_end_request(drive, rq, uptodate, nr_sectors); |
123 | ret = rq->nr_sectors != 0; | ||
124 | else | ||
125 | ret = __ide_end_request(drive, rq, uptodate, nr_sectors); | ||
126 | 123 | ||
127 | spin_unlock_irqrestore(&ide_lock, flags); | 124 | spin_unlock_irqrestore(&ide_lock, flags); |
128 | return ret; | 125 | return ret; |
@@ -247,7 +244,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) | |||
247 | } | 244 | } |
248 | blkdev_dequeue_request(rq); | 245 | blkdev_dequeue_request(rq); |
249 | HWGROUP(drive)->rq = NULL; | 246 | HWGROUP(drive)->rq = NULL; |
250 | end_that_request_last(rq); | 247 | end_that_request_last(rq, 1); |
251 | spin_unlock_irqrestore(&ide_lock, flags); | 248 | spin_unlock_irqrestore(&ide_lock, flags); |
252 | } | 249 | } |
253 | 250 | ||
@@ -379,7 +376,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) | |||
379 | blkdev_dequeue_request(rq); | 376 | blkdev_dequeue_request(rq); |
380 | HWGROUP(drive)->rq = NULL; | 377 | HWGROUP(drive)->rq = NULL; |
381 | rq->errors = err; | 378 | rq->errors = err; |
382 | end_that_request_last(rq); | 379 | end_that_request_last(rq, !rq->errors); |
383 | spin_unlock_irqrestore(&ide_lock, flags); | 380 | spin_unlock_irqrestore(&ide_lock, flags); |
384 | } | 381 | } |
385 | 382 | ||
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 7d7944ed4158..fab9b2b02504 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -4947,6 +4947,7 @@ out: | |||
4947 | return error; | 4947 | return error; |
4948 | } | 4948 | } |
4949 | 4949 | ||
4950 | MODULE_ALIAS("ide:*m-tape*"); | ||
4950 | module_init(idetape_init); | 4951 | module_init(idetape_init); |
4951 | module_exit(idetape_exit); | 4952 | module_exit(idetape_exit); |
4952 | MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR); | 4953 | MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR); |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 8af179b531c3..4b524f6b3ecd 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -1904,9 +1904,69 @@ static int ide_bus_match(struct device *dev, struct device_driver *drv) | |||
1904 | return 1; | 1904 | return 1; |
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | static char *media_string(ide_drive_t *drive) | ||
1908 | { | ||
1909 | switch (drive->media) { | ||
1910 | case ide_disk: | ||
1911 | return "disk"; | ||
1912 | case ide_cdrom: | ||
1913 | return "cdrom"; | ||
1914 | case ide_tape: | ||
1915 | return "tape"; | ||
1916 | case ide_floppy: | ||
1917 | return "floppy"; | ||
1918 | default: | ||
1919 | return "UNKNOWN"; | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | static ssize_t media_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1924 | { | ||
1925 | ide_drive_t *drive = to_ide_device(dev); | ||
1926 | return sprintf(buf, "%s\n", media_string(drive)); | ||
1927 | } | ||
1928 | |||
1929 | static ssize_t drivename_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1930 | { | ||
1931 | ide_drive_t *drive = to_ide_device(dev); | ||
1932 | return sprintf(buf, "%s\n", drive->name); | ||
1933 | } | ||
1934 | |||
1935 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
1936 | { | ||
1937 | ide_drive_t *drive = to_ide_device(dev); | ||
1938 | return sprintf(buf, "ide:m-%s\n", media_string(drive)); | ||
1939 | } | ||
1940 | |||
1941 | static struct device_attribute ide_dev_attrs[] = { | ||
1942 | __ATTR_RO(media), | ||
1943 | __ATTR_RO(drivename), | ||
1944 | __ATTR_RO(modalias), | ||
1945 | __ATTR_NULL | ||
1946 | }; | ||
1947 | |||
1948 | static int ide_uevent(struct device *dev, char **envp, int num_envp, | ||
1949 | char *buffer, int buffer_size) | ||
1950 | { | ||
1951 | ide_drive_t *drive = to_ide_device(dev); | ||
1952 | int i = 0; | ||
1953 | int length = 0; | ||
1954 | |||
1955 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, | ||
1956 | "MEDIA=%s", media_string(drive)); | ||
1957 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, | ||
1958 | "DRIVENAME=%s", drive->name); | ||
1959 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, | ||
1960 | "MODALIAS=ide:m-%s", media_string(drive)); | ||
1961 | envp[i] = NULL; | ||
1962 | return 0; | ||
1963 | } | ||
1964 | |||
1907 | struct bus_type ide_bus_type = { | 1965 | struct bus_type ide_bus_type = { |
1908 | .name = "ide", | 1966 | .name = "ide", |
1909 | .match = ide_bus_match, | 1967 | .match = ide_bus_match, |
1968 | .uevent = ide_uevent, | ||
1969 | .dev_attrs = ide_dev_attrs, | ||
1910 | .suspend = generic_ide_suspend, | 1970 | .suspend = generic_ide_suspend, |
1911 | .resume = generic_ide_resume, | 1971 | .resume = generic_ide_resume, |
1912 | }; | 1972 | }; |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index ef79805218e4..4c2af9020905 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
@@ -88,15 +88,12 @@ typedef struct ide_info_t { | |||
88 | } ide_info_t; | 88 | } ide_info_t; |
89 | 89 | ||
90 | static void ide_release(dev_link_t *); | 90 | static void ide_release(dev_link_t *); |
91 | static int ide_event(event_t event, int priority, | 91 | static void ide_config(dev_link_t *); |
92 | event_callback_args_t *args); | 92 | |
93 | static void ide_detach(struct pcmcia_device *p_dev); | ||
93 | 94 | ||
94 | static dev_info_t dev_info = "ide-cs"; | ||
95 | 95 | ||
96 | static dev_link_t *ide_attach(void); | ||
97 | static void ide_detach(dev_link_t *); | ||
98 | 96 | ||
99 | static dev_link_t *dev_list = NULL; | ||
100 | 97 | ||
101 | /*====================================================================== | 98 | /*====================================================================== |
102 | 99 | ||
@@ -106,18 +103,17 @@ static dev_link_t *dev_list = NULL; | |||
106 | 103 | ||
107 | ======================================================================*/ | 104 | ======================================================================*/ |
108 | 105 | ||
109 | static dev_link_t *ide_attach(void) | 106 | static int ide_attach(struct pcmcia_device *p_dev) |
110 | { | 107 | { |
111 | ide_info_t *info; | 108 | ide_info_t *info; |
112 | dev_link_t *link; | 109 | dev_link_t *link; |
113 | client_reg_t client_reg; | 110 | |
114 | int ret; | ||
115 | |||
116 | DEBUG(0, "ide_attach()\n"); | 111 | DEBUG(0, "ide_attach()\n"); |
117 | 112 | ||
118 | /* Create new ide device */ | 113 | /* Create new ide device */ |
119 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 114 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
120 | if (!info) return NULL; | 115 | if (!info) |
116 | return -ENOMEM; | ||
121 | link = &info->link; link->priv = info; | 117 | link = &info->link; link->priv = info; |
122 | 118 | ||
123 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 119 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
@@ -128,21 +124,14 @@ static dev_link_t *ide_attach(void) | |||
128 | link->conf.Attributes = CONF_ENABLE_IRQ; | 124 | link->conf.Attributes = CONF_ENABLE_IRQ; |
129 | link->conf.Vcc = 50; | 125 | link->conf.Vcc = 50; |
130 | link->conf.IntType = INT_MEMORY_AND_IO; | 126 | link->conf.IntType = INT_MEMORY_AND_IO; |
131 | 127 | ||
132 | /* Register with Card Services */ | 128 | link->handle = p_dev; |
133 | link->next = dev_list; | 129 | p_dev->instance = link; |
134 | dev_list = link; | 130 | |
135 | client_reg.dev_info = &dev_info; | 131 | link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; |
136 | client_reg.Version = 0x0210; | 132 | ide_config(link); |
137 | client_reg.event_callback_args.client_data = link; | 133 | |
138 | ret = pcmcia_register_client(&link->handle, &client_reg); | 134 | return 0; |
139 | if (ret != CS_SUCCESS) { | ||
140 | cs_error(link->handle, RegisterClient, ret); | ||
141 | ide_detach(link); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | return link; | ||
146 | } /* ide_attach */ | 135 | } /* ide_attach */ |
147 | 136 | ||
148 | /*====================================================================== | 137 | /*====================================================================== |
@@ -154,32 +143,16 @@ static dev_link_t *ide_attach(void) | |||
154 | 143 | ||
155 | ======================================================================*/ | 144 | ======================================================================*/ |
156 | 145 | ||
157 | static void ide_detach(dev_link_t *link) | 146 | static void ide_detach(struct pcmcia_device *p_dev) |
158 | { | 147 | { |
159 | dev_link_t **linkp; | 148 | dev_link_t *link = dev_to_instance(p_dev); |
160 | int ret; | ||
161 | 149 | ||
162 | DEBUG(0, "ide_detach(0x%p)\n", link); | 150 | DEBUG(0, "ide_detach(0x%p)\n", link); |
163 | |||
164 | /* Locate device structure */ | ||
165 | for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next) | ||
166 | if (*linkp == link) break; | ||
167 | if (*linkp == NULL) | ||
168 | return; | ||
169 | 151 | ||
170 | if (link->state & DEV_CONFIG) | 152 | if (link->state & DEV_CONFIG) |
171 | ide_release(link); | 153 | ide_release(link); |
172 | 154 | ||
173 | if (link->handle) { | ||
174 | ret = pcmcia_deregister_client(link->handle); | ||
175 | if (ret != CS_SUCCESS) | ||
176 | cs_error(link->handle, DeregisterClient, ret); | ||
177 | } | ||
178 | |||
179 | /* Unlink, free device structure */ | ||
180 | *linkp = link->next; | ||
181 | kfree(link->priv); | 155 | kfree(link->priv); |
182 | |||
183 | } /* ide_detach */ | 156 | } /* ide_detach */ |
184 | 157 | ||
185 | static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) | 158 | static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle) |
@@ -406,6 +379,28 @@ void ide_release(dev_link_t *link) | |||
406 | 379 | ||
407 | } /* ide_release */ | 380 | } /* ide_release */ |
408 | 381 | ||
382 | static int ide_suspend(struct pcmcia_device *dev) | ||
383 | { | ||
384 | dev_link_t *link = dev_to_instance(dev); | ||
385 | |||
386 | link->state |= DEV_SUSPEND; | ||
387 | if (link->state & DEV_CONFIG) | ||
388 | pcmcia_release_configuration(link->handle); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int ide_resume(struct pcmcia_device *dev) | ||
394 | { | ||
395 | dev_link_t *link = dev_to_instance(dev); | ||
396 | |||
397 | link->state &= ~DEV_SUSPEND; | ||
398 | if (DEV_OK(link)) | ||
399 | pcmcia_request_configuration(link->handle, &link->conf); | ||
400 | |||
401 | return 0; | ||
402 | } | ||
403 | |||
409 | /*====================================================================== | 404 | /*====================================================================== |
410 | 405 | ||
411 | The card status event handler. Mostly, this schedules other | 406 | The card status event handler. Mostly, this schedules other |
@@ -415,48 +410,15 @@ void ide_release(dev_link_t *link) | |||
415 | 410 | ||
416 | ======================================================================*/ | 411 | ======================================================================*/ |
417 | 412 | ||
418 | int ide_event(event_t event, int priority, | ||
419 | event_callback_args_t *args) | ||
420 | { | ||
421 | dev_link_t *link = args->client_data; | ||
422 | |||
423 | DEBUG(1, "ide_event(0x%06x)\n", event); | ||
424 | |||
425 | switch (event) { | ||
426 | case CS_EVENT_CARD_REMOVAL: | ||
427 | link->state &= ~DEV_PRESENT; | ||
428 | if (link->state & DEV_CONFIG) | ||
429 | ide_release(link); | ||
430 | break; | ||
431 | case CS_EVENT_CARD_INSERTION: | ||
432 | link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; | ||
433 | ide_config(link); | ||
434 | break; | ||
435 | case CS_EVENT_PM_SUSPEND: | ||
436 | link->state |= DEV_SUSPEND; | ||
437 | /* Fall through... */ | ||
438 | case CS_EVENT_RESET_PHYSICAL: | ||
439 | if (link->state & DEV_CONFIG) | ||
440 | pcmcia_release_configuration(link->handle); | ||
441 | break; | ||
442 | case CS_EVENT_PM_RESUME: | ||
443 | link->state &= ~DEV_SUSPEND; | ||
444 | /* Fall through... */ | ||
445 | case CS_EVENT_CARD_RESET: | ||
446 | if (DEV_OK(link)) | ||
447 | pcmcia_request_configuration(link->handle, &link->conf); | ||
448 | break; | ||
449 | } | ||
450 | return 0; | ||
451 | } /* ide_event */ | ||
452 | |||
453 | static struct pcmcia_device_id ide_ids[] = { | 413 | static struct pcmcia_device_id ide_ids[] = { |
454 | PCMCIA_DEVICE_FUNC_ID(4), | 414 | PCMCIA_DEVICE_FUNC_ID(4), |
415 | PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */ | ||
455 | PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), | 416 | PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), |
456 | PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), | 417 | PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), |
457 | PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ | 418 | PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ |
458 | PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), | 419 | PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), |
459 | PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ | 420 | PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ |
421 | PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ | ||
460 | PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), | 422 | PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), |
461 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */ | 423 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */ |
462 | PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), | 424 | PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0), |
@@ -471,6 +433,8 @@ static struct pcmcia_device_id ide_ids[] = { | |||
471 | PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), | 433 | PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591), |
472 | PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), | 434 | PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728), |
473 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), | 435 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), |
436 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), | ||
437 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), | ||
474 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 438 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
475 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 439 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
476 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 440 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
@@ -494,10 +458,11 @@ static struct pcmcia_driver ide_cs_driver = { | |||
494 | .drv = { | 458 | .drv = { |
495 | .name = "ide-cs", | 459 | .name = "ide-cs", |
496 | }, | 460 | }, |
497 | .attach = ide_attach, | 461 | .probe = ide_attach, |
498 | .event = ide_event, | 462 | .remove = ide_detach, |
499 | .detach = ide_detach, | ||
500 | .id_table = ide_ids, | 463 | .id_table = ide_ids, |
464 | .suspend = ide_suspend, | ||
465 | .resume = ide_resume, | ||
501 | }; | 466 | }; |
502 | 467 | ||
503 | static int __init init_ide_cs(void) | 468 | static int __init init_ide_cs(void) |
@@ -508,7 +473,6 @@ static int __init init_ide_cs(void) | |||
508 | static void __exit exit_ide_cs(void) | 473 | static void __exit exit_ide_cs(void) |
509 | { | 474 | { |
510 | pcmcia_unregister_driver(&ide_cs_driver); | 475 | pcmcia_unregister_driver(&ide_cs_driver); |
511 | BUG_ON(dev_list != NULL); | ||
512 | } | 476 | } |
513 | 477 | ||
514 | late_initcall(init_ide_cs); | 478 | late_initcall(init_ide_cs); |
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile index 578e52a59588..677c7b2bac92 100644 --- a/drivers/ide/mips/Makefile +++ b/drivers/ide/mips/Makefile | |||
@@ -1 +1,4 @@ | |||
1 | obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o | 1 | obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o |
2 | obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o | ||
3 | |||
4 | EXTRA_CFLAGS := -Idrivers/ide | ||
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 2b6327c576b9..32431dcf5d8e 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c | |||
@@ -31,865 +31,638 @@ | |||
31 | */ | 31 | */ |
32 | #undef REALLY_SLOW_IO /* most systems can safely undef this */ | 32 | #undef REALLY_SLOW_IO /* most systems can safely undef this */ |
33 | 33 | ||
34 | #include <linux/config.h> /* for CONFIG_BLK_DEV_IDEPCI */ | ||
35 | #include <linux/types.h> | 34 | #include <linux/types.h> |
36 | #include <linux/module.h> | 35 | #include <linux/module.h> |
37 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
38 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
39 | #include <linux/timer.h> | 38 | #include <linux/platform_device.h> |
40 | #include <linux/mm.h> | 39 | |
41 | #include <linux/ioport.h> | ||
42 | #include <linux/hdreg.h> | ||
43 | #include <linux/init.h> | 40 | #include <linux/init.h> |
44 | #include <linux/ide.h> | 41 | #include <linux/ide.h> |
45 | #include <linux/sysdev.h> | 42 | #include <linux/sysdev.h> |
46 | 43 | ||
47 | #include <linux/dma-mapping.h> | 44 | #include <linux/dma-mapping.h> |
48 | 45 | ||
46 | #include "ide-timing.h" | ||
47 | |||
49 | #include <asm/io.h> | 48 | #include <asm/io.h> |
50 | #include <asm/mach-au1x00/au1xxx.h> | 49 | #include <asm/mach-au1x00/au1xxx.h> |
51 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 50 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
52 | 51 | ||
53 | #if CONFIG_PM | ||
54 | #include <asm/mach-au1x00/au1xxx_pm.h> | ||
55 | #endif | ||
56 | |||
57 | #include <asm/mach-au1x00/au1xxx_ide.h> | 52 | #include <asm/mach-au1x00/au1xxx_ide.h> |
58 | 53 | ||
59 | #define DRV_NAME "au1200-ide" | 54 | #define DRV_NAME "au1200-ide" |
60 | #define DRV_VERSION "1.0" | 55 | #define DRV_VERSION "1.0" |
61 | #define DRV_AUTHOR "AMD PCS / Pete Popov <ppopov@embeddedalley.com>" | 56 | #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>" |
62 | #define DRV_DESC "Au1200 IDE" | ||
63 | |||
64 | static _auide_hwif auide_hwif; | ||
65 | static spinlock_t ide_tune_drive_spin_lock = SPIN_LOCK_UNLOCKED; | ||
66 | static spinlock_t ide_tune_chipset_spin_lock = SPIN_LOCK_UNLOCKED; | ||
67 | static int dbdma_init_done = 0; | ||
68 | |||
69 | /* | ||
70 | * local I/O functions | ||
71 | */ | ||
72 | u8 auide_inb(unsigned long port) | ||
73 | { | ||
74 | return (au_readb(port)); | ||
75 | } | ||
76 | 57 | ||
77 | u16 auide_inw(unsigned long port) | 58 | /* enable the burstmode in the dbdma */ |
78 | { | 59 | #define IDE_AU1XXX_BURSTMODE 1 |
79 | return (au_readw(port)); | ||
80 | } | ||
81 | 60 | ||
82 | u32 auide_inl(unsigned long port) | 61 | static _auide_hwif auide_hwif; |
83 | { | 62 | static int dbdma_init_done; |
84 | return (au_readl(port)); | ||
85 | } | ||
86 | 63 | ||
87 | void auide_insw(unsigned long port, void *addr, u32 count) | ||
88 | { | ||
89 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | 64 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) |
90 | 65 | ||
91 | _auide_hwif *ahwif = &auide_hwif; | 66 | void auide_insw(unsigned long port, void *addr, u32 count) |
92 | chan_tab_t *ctp; | ||
93 | au1x_ddma_desc_t *dp; | ||
94 | |||
95 | if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, | ||
96 | DDMA_FLAGS_NOIE)) { | ||
97 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); | ||
98 | return; | ||
99 | } | ||
100 | ctp = *((chan_tab_t **)ahwif->rx_chan); | ||
101 | dp = ctp->cur_ptr; | ||
102 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
103 | ; | ||
104 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
105 | #else | ||
106 | while (count--) | ||
107 | { | ||
108 | *(u16 *)addr = au_readw(port); | ||
109 | addr +=2 ; | ||
110 | } | ||
111 | #endif | ||
112 | } | ||
113 | |||
114 | void auide_insl(unsigned long port, void *addr, u32 count) | ||
115 | { | ||
116 | while (count--) | ||
117 | { | ||
118 | *(u32 *)addr = au_readl(port); | ||
119 | /* NOTE: For IDE interfaces over PCMCIA, | ||
120 | * 32-bit access does not work | ||
121 | */ | ||
122 | addr += 4; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | void auide_outb(u8 addr, unsigned long port) | ||
127 | { | 67 | { |
128 | return (au_writeb(addr, port)); | 68 | _auide_hwif *ahwif = &auide_hwif; |
129 | } | 69 | chan_tab_t *ctp; |
70 | au1x_ddma_desc_t *dp; | ||
130 | 71 | ||
131 | void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port) | 72 | if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, |
132 | { | 73 | DDMA_FLAGS_NOIE)) { |
133 | return (au_writeb(addr, port)); | 74 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); |
75 | return; | ||
76 | } | ||
77 | ctp = *((chan_tab_t **)ahwif->rx_chan); | ||
78 | dp = ctp->cur_ptr; | ||
79 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
80 | ; | ||
81 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
134 | } | 82 | } |
135 | 83 | ||
136 | void auide_outw(u16 addr, unsigned long port) | 84 | void auide_outsw(unsigned long port, void *addr, u32 count) |
137 | { | 85 | { |
138 | return (au_writew(addr, port)); | 86 | _auide_hwif *ahwif = &auide_hwif; |
139 | } | 87 | chan_tab_t *ctp; |
88 | au1x_ddma_desc_t *dp; | ||
140 | 89 | ||
141 | void auide_outl(u32 addr, unsigned long port) | 90 | if(!put_source_flags(ahwif->tx_chan, (void*)addr, |
142 | { | 91 | count << 1, DDMA_FLAGS_NOIE)) { |
143 | return (au_writel(addr, port)); | 92 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); |
93 | return; | ||
94 | } | ||
95 | ctp = *((chan_tab_t **)ahwif->tx_chan); | ||
96 | dp = ctp->cur_ptr; | ||
97 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
98 | ; | ||
99 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
144 | } | 100 | } |
145 | 101 | ||
146 | void auide_outsw(unsigned long port, void *addr, u32 count) | ||
147 | { | ||
148 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | ||
149 | _auide_hwif *ahwif = &auide_hwif; | ||
150 | chan_tab_t *ctp; | ||
151 | au1x_ddma_desc_t *dp; | ||
152 | |||
153 | if(!put_source_flags(ahwif->tx_chan, (void*)addr, | ||
154 | count << 1, DDMA_FLAGS_NOIE)) { | ||
155 | printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__); | ||
156 | return; | ||
157 | } | ||
158 | ctp = *((chan_tab_t **)ahwif->tx_chan); | ||
159 | dp = ctp->cur_ptr; | ||
160 | while (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
161 | ; | ||
162 | ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp); | ||
163 | #else | ||
164 | while (count--) | ||
165 | { | ||
166 | au_writew(*(u16 *)addr, port); | ||
167 | addr += 2; | ||
168 | } | ||
169 | #endif | 102 | #endif |
170 | } | ||
171 | |||
172 | void auide_outsl(unsigned long port, void *addr, u32 count) | ||
173 | { | ||
174 | while (count--) | ||
175 | { | ||
176 | au_writel(*(u32 *)addr, port); | ||
177 | /* NOTE: For IDE interfaces over PCMCIA, | ||
178 | * 32-bit access does not work | ||
179 | */ | ||
180 | addr += 4; | ||
181 | } | ||
182 | } | ||
183 | 103 | ||
184 | static void auide_tune_drive(ide_drive_t *drive, byte pio) | 104 | static void auide_tune_drive(ide_drive_t *drive, byte pio) |
185 | { | 105 | { |
186 | int mem_sttime; | 106 | int mem_sttime; |
187 | int mem_stcfg; | 107 | int mem_stcfg; |
188 | unsigned long flags; | 108 | u8 speed; |
189 | u8 speed; | 109 | |
190 | 110 | /* get the best pio mode for the drive */ | |
191 | /* get the best pio mode for the drive */ | 111 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); |
192 | pio = ide_get_best_pio_mode(drive, pio, 4, NULL); | 112 | |
193 | 113 | printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n", | |
194 | printk("%s: setting Au1XXX IDE to PIO mode%d\n", | 114 | drive->name, pio); |
195 | drive->name, pio); | 115 | |
196 | 116 | mem_sttime = 0; | |
197 | spin_lock_irqsave(&ide_tune_drive_spin_lock, flags); | 117 | mem_stcfg = au_readl(MEM_STCFG2); |
198 | 118 | ||
199 | mem_sttime = 0; | 119 | /* set pio mode! */ |
200 | mem_stcfg = au_readl(MEM_STCFG2); | 120 | switch(pio) { |
201 | 121 | case 0: | |
202 | /* set pio mode! */ | 122 | mem_sttime = SBC_IDE_TIMING(PIO0); |
203 | switch(pio) { | 123 | |
204 | case 0: | 124 | /* set configuration for RCS2# */ |
205 | /* set timing parameters for RCS2# */ | 125 | mem_stcfg |= TS_MASK; |
206 | mem_sttime = SBC_IDE_PIO0_TWCS | 126 | mem_stcfg &= ~TCSOE_MASK; |
207 | | SBC_IDE_PIO0_TCSH | 127 | mem_stcfg &= ~TOECS_MASK; |
208 | | SBC_IDE_PIO0_TCSOFF | 128 | mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; |
209 | | SBC_IDE_PIO0_TWP | 129 | break; |
210 | | SBC_IDE_PIO0_TCSW | 130 | |
211 | | SBC_IDE_PIO0_TPM | 131 | case 1: |
212 | | SBC_IDE_PIO0_TA; | 132 | mem_sttime = SBC_IDE_TIMING(PIO1); |
213 | /* set configuration for RCS2# */ | 133 | |
214 | mem_stcfg |= TS_MASK; | 134 | /* set configuration for RCS2# */ |
215 | mem_stcfg &= ~TCSOE_MASK; | 135 | mem_stcfg |= TS_MASK; |
216 | mem_stcfg &= ~TOECS_MASK; | 136 | mem_stcfg &= ~TCSOE_MASK; |
217 | mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS; | 137 | mem_stcfg &= ~TOECS_MASK; |
218 | 138 | mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; | |
219 | au_writel(mem_sttime,MEM_STTIME2); | 139 | break; |
220 | au_writel(mem_stcfg,MEM_STCFG2); | 140 | |
221 | break; | 141 | case 2: |
222 | 142 | mem_sttime = SBC_IDE_TIMING(PIO2); | |
223 | case 1: | 143 | |
224 | /* set timing parameters for RCS2# */ | 144 | /* set configuration for RCS2# */ |
225 | mem_sttime = SBC_IDE_PIO1_TWCS | 145 | mem_stcfg &= ~TS_MASK; |
226 | | SBC_IDE_PIO1_TCSH | 146 | mem_stcfg &= ~TCSOE_MASK; |
227 | | SBC_IDE_PIO1_TCSOFF | 147 | mem_stcfg &= ~TOECS_MASK; |
228 | | SBC_IDE_PIO1_TWP | 148 | mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; |
229 | | SBC_IDE_PIO1_TCSW | 149 | break; |
230 | | SBC_IDE_PIO1_TPM | 150 | |
231 | | SBC_IDE_PIO1_TA; | 151 | case 3: |
232 | /* set configuration for RCS2# */ | 152 | mem_sttime = SBC_IDE_TIMING(PIO3); |
233 | mem_stcfg |= TS_MASK; | 153 | |
234 | mem_stcfg &= ~TCSOE_MASK; | 154 | /* set configuration for RCS2# */ |
235 | mem_stcfg &= ~TOECS_MASK; | 155 | mem_stcfg &= ~TS_MASK; |
236 | mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS; | 156 | mem_stcfg &= ~TCSOE_MASK; |
237 | break; | 157 | mem_stcfg &= ~TOECS_MASK; |
238 | 158 | mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; | |
239 | case 2: | 159 | |
240 | /* set timing parameters for RCS2# */ | 160 | break; |
241 | mem_sttime = SBC_IDE_PIO2_TWCS | 161 | |
242 | | SBC_IDE_PIO2_TCSH | 162 | case 4: |
243 | | SBC_IDE_PIO2_TCSOFF | 163 | mem_sttime = SBC_IDE_TIMING(PIO4); |
244 | | SBC_IDE_PIO2_TWP | 164 | |
245 | | SBC_IDE_PIO2_TCSW | 165 | /* set configuration for RCS2# */ |
246 | | SBC_IDE_PIO2_TPM | 166 | mem_stcfg &= ~TS_MASK; |
247 | | SBC_IDE_PIO2_TA; | 167 | mem_stcfg &= ~TCSOE_MASK; |
248 | /* set configuration for RCS2# */ | 168 | mem_stcfg &= ~TOECS_MASK; |
249 | mem_stcfg &= ~TS_MASK; | 169 | mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; |
250 | mem_stcfg &= ~TCSOE_MASK; | 170 | break; |
251 | mem_stcfg &= ~TOECS_MASK; | 171 | } |
252 | mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS; | 172 | |
253 | break; | 173 | au_writel(mem_sttime,MEM_STTIME2); |
254 | 174 | au_writel(mem_stcfg,MEM_STCFG2); | |
255 | case 3: | 175 | |
256 | /* set timing parameters for RCS2# */ | 176 | speed = pio + XFER_PIO_0; |
257 | mem_sttime = SBC_IDE_PIO3_TWCS | 177 | ide_config_drive_speed(drive, speed); |
258 | | SBC_IDE_PIO3_TCSH | ||
259 | | SBC_IDE_PIO3_TCSOFF | ||
260 | | SBC_IDE_PIO3_TWP | ||
261 | | SBC_IDE_PIO3_TCSW | ||
262 | | SBC_IDE_PIO3_TPM | ||
263 | | SBC_IDE_PIO3_TA; | ||
264 | /* set configuration for RCS2# */ | ||
265 | mem_stcfg |= TS_MASK; | ||
266 | mem_stcfg &= ~TS_MASK; | ||
267 | mem_stcfg &= ~TCSOE_MASK; | ||
268 | mem_stcfg &= ~TOECS_MASK; | ||
269 | mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS; | ||
270 | |||
271 | break; | ||
272 | |||
273 | case 4: | ||
274 | /* set timing parameters for RCS2# */ | ||
275 | mem_sttime = SBC_IDE_PIO4_TWCS | ||
276 | | SBC_IDE_PIO4_TCSH | ||
277 | | SBC_IDE_PIO4_TCSOFF | ||
278 | | SBC_IDE_PIO4_TWP | ||
279 | | SBC_IDE_PIO4_TCSW | ||
280 | | SBC_IDE_PIO4_TPM | ||
281 | | SBC_IDE_PIO4_TA; | ||
282 | /* set configuration for RCS2# */ | ||
283 | mem_stcfg &= ~TS_MASK; | ||
284 | mem_stcfg &= ~TCSOE_MASK; | ||
285 | mem_stcfg &= ~TOECS_MASK; | ||
286 | mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS; | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | au_writel(mem_sttime,MEM_STTIME2); | ||
291 | au_writel(mem_stcfg,MEM_STCFG2); | ||
292 | |||
293 | spin_unlock_irqrestore(&ide_tune_drive_spin_lock, flags); | ||
294 | |||
295 | speed = pio + XFER_PIO_0; | ||
296 | ide_config_drive_speed(drive, speed); | ||
297 | } | 178 | } |
298 | 179 | ||
299 | static int auide_tune_chipset (ide_drive_t *drive, u8 speed) | 180 | static int auide_tune_chipset (ide_drive_t *drive, u8 speed) |
300 | { | 181 | { |
301 | u8 mode = 0; | 182 | int mem_sttime; |
302 | int mem_sttime; | 183 | int mem_stcfg; |
303 | int mem_stcfg; | 184 | unsigned long mode; |
304 | unsigned long flags; | 185 | |
305 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 186 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
306 | struct hd_driveid *id = drive->id; | 187 | if (ide_use_dma(drive)) |
307 | 188 | mode = ide_dma_speed(drive, 0); | |
308 | /* | ||
309 | * Now see what the current drive is capable of, | ||
310 | * selecting UDMA only if the mate said it was ok. | ||
311 | */ | ||
312 | if (id && (id->capability & 1) && drive->autodma && | ||
313 | !__ide_dma_bad_drive(drive)) { | ||
314 | if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) { | ||
315 | if (id->dma_mword & 4) | ||
316 | mode = XFER_MW_DMA_2; | ||
317 | else if (id->dma_mword & 2) | ||
318 | mode = XFER_MW_DMA_1; | ||
319 | else if (id->dma_mword & 1) | ||
320 | mode = XFER_MW_DMA_0; | ||
321 | } | ||
322 | } | ||
323 | #endif | 189 | #endif |
324 | 190 | ||
325 | spin_lock_irqsave(&ide_tune_chipset_spin_lock, flags); | 191 | mem_sttime = 0; |
192 | mem_stcfg = au_readl(MEM_STCFG2); | ||
326 | 193 | ||
327 | mem_sttime = 0; | 194 | if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) { |
328 | mem_stcfg = au_readl(MEM_STCFG2); | 195 | auide_tune_drive(drive, speed - XFER_PIO_0); |
329 | 196 | return 0; | |
330 | switch(speed) { | 197 | } |
331 | case XFER_PIO_4: | 198 | |
332 | case XFER_PIO_3: | 199 | switch(speed) { |
333 | case XFER_PIO_2: | ||
334 | case XFER_PIO_1: | ||
335 | case XFER_PIO_0: | ||
336 | auide_tune_drive(drive, (speed - XFER_PIO_0)); | ||
337 | break; | ||
338 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 200 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
339 | case XFER_MW_DMA_2: | 201 | case XFER_MW_DMA_2: |
340 | /* set timing parameters for RCS2# */ | 202 | mem_sttime = SBC_IDE_TIMING(MDMA2); |
341 | mem_sttime = SBC_IDE_MDMA2_TWCS | 203 | |
342 | | SBC_IDE_MDMA2_TCSH | 204 | /* set configuration for RCS2# */ |
343 | | SBC_IDE_MDMA2_TCSOFF | 205 | mem_stcfg &= ~TS_MASK; |
344 | | SBC_IDE_MDMA2_TWP | 206 | mem_stcfg &= ~TCSOE_MASK; |
345 | | SBC_IDE_MDMA2_TCSW | 207 | mem_stcfg &= ~TOECS_MASK; |
346 | | SBC_IDE_MDMA2_TPM | 208 | mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; |
347 | | SBC_IDE_MDMA2_TA; | 209 | |
348 | /* set configuration for RCS2# */ | 210 | mode = XFER_MW_DMA_2; |
349 | mem_stcfg &= ~TS_MASK; | 211 | break; |
350 | mem_stcfg &= ~TCSOE_MASK; | 212 | case XFER_MW_DMA_1: |
351 | mem_stcfg &= ~TOECS_MASK; | 213 | mem_sttime = SBC_IDE_TIMING(MDMA1); |
352 | mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS; | 214 | |
353 | 215 | /* set configuration for RCS2# */ | |
354 | mode = XFER_MW_DMA_2; | 216 | mem_stcfg &= ~TS_MASK; |
355 | break; | 217 | mem_stcfg &= ~TCSOE_MASK; |
356 | case XFER_MW_DMA_1: | 218 | mem_stcfg &= ~TOECS_MASK; |
357 | /* set timing parameters for RCS2# */ | 219 | mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; |
358 | mem_sttime = SBC_IDE_MDMA1_TWCS | 220 | |
359 | | SBC_IDE_MDMA1_TCSH | 221 | mode = XFER_MW_DMA_1; |
360 | | SBC_IDE_MDMA1_TCSOFF | 222 | break; |
361 | | SBC_IDE_MDMA1_TWP | 223 | case XFER_MW_DMA_0: |
362 | | SBC_IDE_MDMA1_TCSW | 224 | mem_sttime = SBC_IDE_TIMING(MDMA0); |
363 | | SBC_IDE_MDMA1_TPM | 225 | |
364 | | SBC_IDE_MDMA1_TA; | 226 | /* set configuration for RCS2# */ |
365 | /* set configuration for RCS2# */ | 227 | mem_stcfg |= TS_MASK; |
366 | mem_stcfg &= ~TS_MASK; | 228 | mem_stcfg &= ~TCSOE_MASK; |
367 | mem_stcfg &= ~TCSOE_MASK; | 229 | mem_stcfg &= ~TOECS_MASK; |
368 | mem_stcfg &= ~TOECS_MASK; | 230 | mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; |
369 | mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS; | 231 | |
370 | 232 | mode = XFER_MW_DMA_0; | |
371 | mode = XFER_MW_DMA_1; | 233 | break; |
372 | break; | ||
373 | case XFER_MW_DMA_0: | ||
374 | /* set timing parameters for RCS2# */ | ||
375 | mem_sttime = SBC_IDE_MDMA0_TWCS | ||
376 | | SBC_IDE_MDMA0_TCSH | ||
377 | | SBC_IDE_MDMA0_TCSOFF | ||
378 | | SBC_IDE_MDMA0_TWP | ||
379 | | SBC_IDE_MDMA0_TCSW | ||
380 | | SBC_IDE_MDMA0_TPM | ||
381 | | SBC_IDE_MDMA0_TA; | ||
382 | /* set configuration for RCS2# */ | ||
383 | mem_stcfg |= TS_MASK; | ||
384 | mem_stcfg &= ~TCSOE_MASK; | ||
385 | mem_stcfg &= ~TOECS_MASK; | ||
386 | mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS; | ||
387 | |||
388 | mode = XFER_MW_DMA_0; | ||
389 | break; | ||
390 | #endif | 234 | #endif |
391 | default: | 235 | default: |
392 | return 1; | 236 | return 1; |
393 | } | 237 | } |
394 | 238 | ||
395 | /* | 239 | if (ide_config_drive_speed(drive, mode)) |
396 | * Tell the drive to switch to the new mode; abort on failure. | 240 | return 1; |
397 | */ | ||
398 | if (!mode || ide_config_drive_speed(drive, mode)) | ||
399 | { | ||
400 | return 1; /* failure */ | ||
401 | } | ||
402 | |||
403 | |||
404 | au_writel(mem_sttime,MEM_STTIME2); | ||
405 | au_writel(mem_stcfg,MEM_STCFG2); | ||
406 | 241 | ||
407 | spin_unlock_irqrestore(&ide_tune_chipset_spin_lock, flags); | 242 | au_writel(mem_sttime,MEM_STTIME2); |
243 | au_writel(mem_stcfg,MEM_STCFG2); | ||
408 | 244 | ||
409 | return 0; | 245 | return 0; |
410 | } | 246 | } |
411 | 247 | ||
412 | /* | 248 | /* |
413 | * Multi-Word DMA + DbDMA functions | 249 | * Multi-Word DMA + DbDMA functions |
414 | */ | 250 | */ |
415 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | ||
416 | 251 | ||
417 | static int in_drive_list(struct hd_driveid *id, | 252 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
418 | const struct drive_list_entry *drive_table) | ||
419 | { | ||
420 | for ( ; drive_table->id_model ; drive_table++){ | ||
421 | if ((!strcmp(drive_table->id_model, id->model)) && | ||
422 | ((strstr(drive_table->id_firmware, id->fw_rev)) || | ||
423 | (!strcmp(drive_table->id_firmware, "ALL"))) | ||
424 | ) | ||
425 | return 1; | ||
426 | } | ||
427 | return 0; | ||
428 | } | ||
429 | 253 | ||
430 | static int auide_build_sglist(ide_drive_t *drive, struct request *rq) | 254 | static int auide_build_sglist(ide_drive_t *drive, struct request *rq) |
431 | { | 255 | { |
432 | ide_hwif_t *hwif = drive->hwif; | 256 | ide_hwif_t *hwif = drive->hwif; |
433 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 257 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
434 | struct scatterlist *sg = hwif->sg_table; | 258 | struct scatterlist *sg = hwif->sg_table; |
435 | 259 | ||
436 | ide_map_sg(drive, rq); | 260 | ide_map_sg(drive, rq); |
437 | 261 | ||
438 | if (rq_data_dir(rq) == READ) | 262 | if (rq_data_dir(rq) == READ) |
439 | hwif->sg_dma_direction = DMA_FROM_DEVICE; | 263 | hwif->sg_dma_direction = DMA_FROM_DEVICE; |
440 | else | 264 | else |
441 | hwif->sg_dma_direction = DMA_TO_DEVICE; | 265 | hwif->sg_dma_direction = DMA_TO_DEVICE; |
442 | 266 | ||
443 | return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, | 267 | return dma_map_sg(ahwif->dev, sg, hwif->sg_nents, |
444 | hwif->sg_dma_direction); | 268 | hwif->sg_dma_direction); |
445 | } | 269 | } |
446 | 270 | ||
447 | static int auide_build_dmatable(ide_drive_t *drive) | 271 | static int auide_build_dmatable(ide_drive_t *drive) |
448 | { | 272 | { |
449 | int i, iswrite, count = 0; | 273 | int i, iswrite, count = 0; |
450 | ide_hwif_t *hwif = HWIF(drive); | 274 | ide_hwif_t *hwif = HWIF(drive); |
451 | 275 | ||
452 | struct request *rq = HWGROUP(drive)->rq; | 276 | struct request *rq = HWGROUP(drive)->rq; |
453 | 277 | ||
454 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 278 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
455 | struct scatterlist *sg; | 279 | struct scatterlist *sg; |
456 | 280 | ||
457 | iswrite = (rq_data_dir(rq) == WRITE); | 281 | iswrite = (rq_data_dir(rq) == WRITE); |
458 | /* Save for interrupt context */ | 282 | /* Save for interrupt context */ |
459 | ahwif->drive = drive; | 283 | ahwif->drive = drive; |
460 | 284 | ||
461 | /* Build sglist */ | 285 | /* Build sglist */ |
462 | hwif->sg_nents = i = auide_build_sglist(drive, rq); | 286 | hwif->sg_nents = i = auide_build_sglist(drive, rq); |
463 | 287 | ||
464 | if (!i) | 288 | if (!i) |
465 | return 0; | 289 | return 0; |
466 | 290 | ||
467 | /* fill the descriptors */ | 291 | /* fill the descriptors */ |
468 | sg = hwif->sg_table; | 292 | sg = hwif->sg_table; |
469 | while (i && sg_dma_len(sg)) { | 293 | while (i && sg_dma_len(sg)) { |
470 | u32 cur_addr; | 294 | u32 cur_addr; |
471 | u32 cur_len; | 295 | u32 cur_len; |
472 | 296 | ||
473 | cur_addr = sg_dma_address(sg); | 297 | cur_addr = sg_dma_address(sg); |
474 | cur_len = sg_dma_len(sg); | 298 | cur_len = sg_dma_len(sg); |
475 | 299 | ||
476 | while (cur_len) { | 300 | while (cur_len) { |
477 | u32 flags = DDMA_FLAGS_NOIE; | 301 | u32 flags = DDMA_FLAGS_NOIE; |
478 | unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; | 302 | unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00; |
479 | 303 | ||
480 | if (++count >= PRD_ENTRIES) { | 304 | if (++count >= PRD_ENTRIES) { |
481 | printk(KERN_WARNING "%s: DMA table too small\n", | 305 | printk(KERN_WARNING "%s: DMA table too small\n", |
482 | drive->name); | 306 | drive->name); |
483 | goto use_pio_instead; | 307 | goto use_pio_instead; |
484 | } | 308 | } |
485 | 309 | ||
486 | /* Lets enable intr for the last descriptor only */ | 310 | /* Lets enable intr for the last descriptor only */ |
487 | if (1==i) | 311 | if (1==i) |
488 | flags = DDMA_FLAGS_IE; | 312 | flags = DDMA_FLAGS_IE; |
489 | else | 313 | else |
490 | flags = DDMA_FLAGS_NOIE; | 314 | flags = DDMA_FLAGS_NOIE; |
491 | 315 | ||
492 | if (iswrite) { | 316 | if (iswrite) { |
493 | if(!put_source_flags(ahwif->tx_chan, | 317 | if(!put_source_flags(ahwif->tx_chan, |
494 | (void*)(page_address(sg->page) | 318 | (void*)(page_address(sg->page) |
495 | + sg->offset), | 319 | + sg->offset), |
496 | tc, flags)) { | 320 | tc, flags)) { |
497 | printk(KERN_ERR "%s failed %d\n", | 321 | printk(KERN_ERR "%s failed %d\n", |
498 | __FUNCTION__, __LINE__); | 322 | __FUNCTION__, __LINE__); |
499 | } | 323 | } |
500 | } else | 324 | } else |
501 | { | 325 | { |
502 | if(!put_dest_flags(ahwif->rx_chan, | 326 | if(!put_dest_flags(ahwif->rx_chan, |
503 | (void*)(page_address(sg->page) | 327 | (void*)(page_address(sg->page) |
504 | + sg->offset), | 328 | + sg->offset), |
505 | tc, flags)) { | 329 | tc, flags)) { |
506 | printk(KERN_ERR "%s failed %d\n", | 330 | printk(KERN_ERR "%s failed %d\n", |
507 | __FUNCTION__, __LINE__); | 331 | __FUNCTION__, __LINE__); |
508 | } | 332 | } |
509 | } | 333 | } |
510 | 334 | ||
511 | cur_addr += tc; | 335 | cur_addr += tc; |
512 | cur_len -= tc; | 336 | cur_len -= tc; |
513 | } | 337 | } |
514 | sg++; | 338 | sg++; |
515 | i--; | 339 | i--; |
516 | } | 340 | } |
517 | 341 | ||
518 | if (count) | 342 | if (count) |
519 | return 1; | 343 | return 1; |
520 | 344 | ||
521 | use_pio_instead: | 345 | use_pio_instead: |
522 | dma_unmap_sg(ahwif->dev, | 346 | dma_unmap_sg(ahwif->dev, |
523 | hwif->sg_table, | 347 | hwif->sg_table, |
524 | hwif->sg_nents, | 348 | hwif->sg_nents, |
525 | hwif->sg_dma_direction); | 349 | hwif->sg_dma_direction); |
526 | 350 | ||
527 | return 0; /* revert to PIO for this request */ | 351 | return 0; /* revert to PIO for this request */ |
528 | } | 352 | } |
529 | 353 | ||
530 | static int auide_dma_end(ide_drive_t *drive) | 354 | static int auide_dma_end(ide_drive_t *drive) |
531 | { | 355 | { |
532 | ide_hwif_t *hwif = HWIF(drive); | 356 | ide_hwif_t *hwif = HWIF(drive); |
533 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; | 357 | _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data; |
534 | 358 | ||
535 | if (hwif->sg_nents) { | 359 | if (hwif->sg_nents) { |
536 | dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, | 360 | dma_unmap_sg(ahwif->dev, hwif->sg_table, hwif->sg_nents, |
537 | hwif->sg_dma_direction); | 361 | hwif->sg_dma_direction); |
538 | hwif->sg_nents = 0; | 362 | hwif->sg_nents = 0; |
539 | } | 363 | } |
540 | 364 | ||
541 | return 0; | 365 | return 0; |
542 | } | 366 | } |
543 | 367 | ||
544 | static void auide_dma_start(ide_drive_t *drive ) | 368 | static void auide_dma_start(ide_drive_t *drive ) |
545 | { | 369 | { |
546 | // printk("%s\n", __FUNCTION__); | ||
547 | } | 370 | } |
548 | 371 | ||
549 | ide_startstop_t auide_dma_intr(ide_drive_t *drive) | ||
550 | { | ||
551 | //printk("%s\n", __FUNCTION__); | ||
552 | |||
553 | u8 stat = 0, dma_stat = 0; | ||
554 | |||
555 | dma_stat = HWIF(drive)->ide_dma_end(drive); | ||
556 | stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */ | ||
557 | if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { | ||
558 | if (!dma_stat) { | ||
559 | struct request *rq = HWGROUP(drive)->rq; | ||
560 | |||
561 | ide_end_request(drive, 1, rq->nr_sectors); | ||
562 | return ide_stopped; | ||
563 | } | ||
564 | printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", | ||
565 | drive->name, dma_stat); | ||
566 | } | ||
567 | return ide_error(drive, "dma_intr", stat); | ||
568 | } | ||
569 | 372 | ||
570 | static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command) | 373 | static void auide_dma_exec_cmd(ide_drive_t *drive, u8 command) |
571 | { | 374 | { |
572 | //printk("%s\n", __FUNCTION__); | 375 | /* issue cmd to drive */ |
573 | 376 | ide_execute_command(drive, command, &ide_dma_intr, | |
574 | /* issue cmd to drive */ | 377 | (2*WAIT_CMD), NULL); |
575 | ide_execute_command(drive, command, &auide_dma_intr, | ||
576 | (2*WAIT_CMD), NULL); | ||
577 | } | 378 | } |
578 | 379 | ||
579 | static int auide_dma_setup(ide_drive_t *drive) | 380 | static int auide_dma_setup(ide_drive_t *drive) |
580 | { | 381 | { |
581 | // printk("%s\n", __FUNCTION__); | 382 | struct request *rq = HWGROUP(drive)->rq; |
582 | |||
583 | if (drive->media != ide_disk) | ||
584 | return 1; | ||
585 | |||
586 | if (!auide_build_dmatable(drive)) | ||
587 | /* try PIO instead of DMA */ | ||
588 | return 1; | ||
589 | 383 | ||
590 | drive->waiting_for_dma = 1; | 384 | if (!auide_build_dmatable(drive)) { |
385 | ide_map_sg(drive, rq); | ||
386 | return 1; | ||
387 | } | ||
591 | 388 | ||
592 | return 0; | 389 | drive->waiting_for_dma = 1; |
390 | return 0; | ||
593 | } | 391 | } |
594 | 392 | ||
595 | static int auide_dma_check(ide_drive_t *drive) | 393 | static int auide_dma_check(ide_drive_t *drive) |
596 | { | 394 | { |
597 | // printk("%s\n", __FUNCTION__); | 395 | u8 speed; |
598 | 396 | ||
599 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 397 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
600 | if( !dbdma_init_done ){ | 398 | |
601 | auide_hwif.white_list = in_drive_list(drive->id, | 399 | if( dbdma_init_done == 0 ){ |
602 | dma_white_list); | 400 | auide_hwif.white_list = ide_in_drive_list(drive->id, |
603 | auide_hwif.black_list = in_drive_list(drive->id, | 401 | dma_white_list); |
604 | dma_black_list); | 402 | auide_hwif.black_list = ide_in_drive_list(drive->id, |
605 | auide_hwif.drive = drive; | 403 | dma_black_list); |
606 | auide_ddma_init(&auide_hwif); | 404 | auide_hwif.drive = drive; |
607 | dbdma_init_done = 1; | 405 | auide_ddma_init(&auide_hwif); |
608 | } | 406 | dbdma_init_done = 1; |
407 | } | ||
609 | #endif | 408 | #endif |
610 | 409 | ||
611 | /* Is the drive in our DMA black list? */ | 410 | /* Is the drive in our DMA black list? */ |
612 | if ( auide_hwif.black_list ) { | 411 | |
613 | drive->using_dma = 0; | 412 | if ( auide_hwif.black_list ) { |
614 | printk("%s found in dma_blacklist[]! Disabling DMA.\n", | 413 | drive->using_dma = 0; |
615 | drive->id->model); | 414 | |
616 | } | 415 | /* Borrowed the warning message from ide-dma.c */ |
617 | else | ||
618 | drive->using_dma = 1; | ||
619 | 416 | ||
620 | return HWIF(drive)->ide_dma_host_on(drive); | 417 | printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n", |
418 | drive->name, drive->id->model); | ||
419 | } | ||
420 | else | ||
421 | drive->using_dma = 1; | ||
422 | |||
423 | speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA); | ||
424 | |||
425 | if (drive->autodma && (speed & XFER_MODE) != XFER_PIO) | ||
426 | return HWIF(drive)->ide_dma_on(drive); | ||
427 | |||
428 | return HWIF(drive)->ide_dma_off_quietly(drive); | ||
621 | } | 429 | } |
622 | 430 | ||
623 | static int auide_dma_test_irq(ide_drive_t *drive) | 431 | static int auide_dma_test_irq(ide_drive_t *drive) |
624 | { | 432 | { |
625 | // printk("%s\n", __FUNCTION__); | 433 | if (drive->waiting_for_dma == 0) |
626 | 434 | printk(KERN_WARNING "%s: ide_dma_test_irq \ | |
627 | if (!drive->waiting_for_dma) | ||
628 | printk(KERN_WARNING "%s: ide_dma_test_irq \ | ||
629 | called while not waiting\n", drive->name); | 435 | called while not waiting\n", drive->name); |
630 | 436 | ||
631 | /* If dbdma didn't execute the STOP command yet, the | 437 | /* If dbdma didn't execute the STOP command yet, the |
632 | * active bit is still set | 438 | * active bit is still set |
633 | */ | 439 | */ |
634 | drive->waiting_for_dma++; | 440 | drive->waiting_for_dma++; |
635 | if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { | 441 | if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { |
636 | printk(KERN_WARNING "%s: timeout waiting for ddma to \ | 442 | printk(KERN_WARNING "%s: timeout waiting for ddma to \ |
637 | complete\n", drive->name); | 443 | complete\n", drive->name); |
638 | return 1; | 444 | return 1; |
639 | } | 445 | } |
640 | udelay(10); | 446 | udelay(10); |
641 | return 0; | 447 | return 0; |
642 | } | 448 | } |
643 | 449 | ||
644 | static int auide_dma_host_on(ide_drive_t *drive) | 450 | static int auide_dma_host_on(ide_drive_t *drive) |
645 | { | 451 | { |
646 | // printk("%s\n", __FUNCTION__); | 452 | return 0; |
647 | return 0; | ||
648 | } | 453 | } |
649 | 454 | ||
650 | static int auide_dma_on(ide_drive_t *drive) | 455 | static int auide_dma_on(ide_drive_t *drive) |
651 | { | 456 | { |
652 | // printk("%s\n", __FUNCTION__); | 457 | drive->using_dma = 1; |
653 | drive->using_dma = 1; | 458 | return auide_dma_host_on(drive); |
654 | return auide_dma_host_on(drive); | ||
655 | } | 459 | } |
656 | 460 | ||
657 | 461 | ||
658 | static int auide_dma_host_off(ide_drive_t *drive) | 462 | static int auide_dma_host_off(ide_drive_t *drive) |
659 | { | 463 | { |
660 | // printk("%s\n", __FUNCTION__); | 464 | return 0; |
661 | return 0; | ||
662 | } | 465 | } |
663 | 466 | ||
664 | static int auide_dma_off_quietly(ide_drive_t *drive) | 467 | static int auide_dma_off_quietly(ide_drive_t *drive) |
665 | { | 468 | { |
666 | // printk("%s\n", __FUNCTION__); | 469 | drive->using_dma = 0; |
667 | drive->using_dma = 0; | 470 | return auide_dma_host_off(drive); |
668 | return auide_dma_host_off(drive); | ||
669 | } | 471 | } |
670 | 472 | ||
671 | static int auide_dma_lostirq(ide_drive_t *drive) | 473 | static int auide_dma_lostirq(ide_drive_t *drive) |
672 | { | 474 | { |
673 | // printk("%s\n", __FUNCTION__); | 475 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); |
674 | 476 | return 0; | |
675 | printk(KERN_ERR "%s: IRQ lost\n", drive->name); | ||
676 | return 0; | ||
677 | } | 477 | } |
678 | 478 | ||
679 | static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs) | 479 | static void auide_ddma_tx_callback(int irq, void *param, struct pt_regs *regs) |
680 | { | 480 | { |
681 | // printk("%s\n", __FUNCTION__); | 481 | _auide_hwif *ahwif = (_auide_hwif*)param; |
682 | 482 | ahwif->drive->waiting_for_dma = 0; | |
683 | _auide_hwif *ahwif = (_auide_hwif*)param; | ||
684 | ahwif->drive->waiting_for_dma = 0; | ||
685 | return; | ||
686 | } | 483 | } |
687 | 484 | ||
688 | static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs) | 485 | static void auide_ddma_rx_callback(int irq, void *param, struct pt_regs *regs) |
689 | { | 486 | { |
690 | // printk("%s\n", __FUNCTION__); | 487 | _auide_hwif *ahwif = (_auide_hwif*)param; |
488 | ahwif->drive->waiting_for_dma = 0; | ||
489 | } | ||
490 | |||
491 | #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | ||
691 | 492 | ||
692 | _auide_hwif *ahwif = (_auide_hwif*)param; | 493 | static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags) |
693 | ahwif->drive->waiting_for_dma = 0; | 494 | { |
694 | return; | 495 | dev->dev_id = dev_id; |
496 | dev->dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
497 | dev->dev_intlevel = 0; | ||
498 | dev->dev_intpolarity = 0; | ||
499 | dev->dev_tsize = tsize; | ||
500 | dev->dev_devwidth = devwidth; | ||
501 | dev->dev_flags = flags; | ||
695 | } | 502 | } |
503 | |||
504 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | ||
696 | 505 | ||
697 | static int auide_dma_timeout(ide_drive_t *drive) | 506 | static int auide_dma_timeout(ide_drive_t *drive) |
698 | { | 507 | { |
699 | // printk("%s\n", __FUNCTION__); | 508 | // printk("%s\n", __FUNCTION__); |
700 | 509 | ||
701 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); | 510 | printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); |
702 | 511 | ||
703 | if (HWIF(drive)->ide_dma_test_irq(drive)) | 512 | if (HWIF(drive)->ide_dma_test_irq(drive)) |
704 | return 0; | 513 | return 0; |
705 | 514 | ||
706 | return HWIF(drive)->ide_dma_end(drive); | 515 | return HWIF(drive)->ide_dma_end(drive); |
707 | } | 516 | } |
708 | #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | 517 | |
709 | 518 | ||
519 | static int auide_ddma_init(_auide_hwif *auide) { | ||
520 | |||
521 | dbdev_tab_t source_dev_tab, target_dev_tab; | ||
522 | u32 dev_id, tsize, devwidth, flags; | ||
523 | ide_hwif_t *hwif = auide->hwif; | ||
710 | 524 | ||
711 | static int auide_ddma_init( _auide_hwif *auide ) | 525 | dev_id = AU1XXX_ATA_DDMA_REQ; |
712 | { | ||
713 | // printk("%s\n", __FUNCTION__); | ||
714 | 526 | ||
715 | dbdev_tab_t source_dev_tab; | 527 | if (auide->white_list || auide->black_list) { |
716 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 528 | tsize = 8; |
717 | dbdev_tab_t target_dev_tab; | 529 | devwidth = 32; |
718 | ide_hwif_t *hwif = auide->hwif; | 530 | } |
719 | char warning_output [2][80]; | 531 | else { |
720 | int i; | 532 | tsize = 1; |
721 | #endif | 533 | devwidth = 16; |
534 | |||
535 | printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model); | ||
536 | printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'"); | ||
537 | } | ||
722 | 538 | ||
723 | /* Add our custom device to DDMA device table */ | 539 | #ifdef IDE_AU1XXX_BURSTMODE |
724 | /* Create our new device entries in the table */ | 540 | flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; |
725 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | ||
726 | source_dev_tab.dev_id = AU1XXX_ATA_DDMA_REQ; | ||
727 | |||
728 | if( auide->white_list || auide->black_list ){ | ||
729 | source_dev_tab.dev_tsize = 8; | ||
730 | source_dev_tab.dev_devwidth = 32; | ||
731 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
732 | source_dev_tab.dev_intlevel = 0; | ||
733 | source_dev_tab.dev_intpolarity = 0; | ||
734 | |||
735 | /* init device table for target - static bus controller - */ | ||
736 | target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | ||
737 | target_dev_tab.dev_tsize = 8; | ||
738 | target_dev_tab.dev_devwidth = 32; | ||
739 | target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
740 | target_dev_tab.dev_intlevel = 0; | ||
741 | target_dev_tab.dev_intpolarity = 0; | ||
742 | target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; | ||
743 | } | ||
744 | else{ | ||
745 | source_dev_tab.dev_tsize = 1; | ||
746 | source_dev_tab.dev_devwidth = 16; | ||
747 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
748 | source_dev_tab.dev_intlevel = 0; | ||
749 | source_dev_tab.dev_intpolarity = 0; | ||
750 | |||
751 | /* init device table for target - static bus controller - */ | ||
752 | target_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | ||
753 | target_dev_tab.dev_tsize = 1; | ||
754 | target_dev_tab.dev_devwidth = 16; | ||
755 | target_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
756 | target_dev_tab.dev_intlevel = 0; | ||
757 | target_dev_tab.dev_intpolarity = 0; | ||
758 | target_dev_tab.dev_flags = DEV_FLAGS_ANYUSE; | ||
759 | |||
760 | sprintf(&warning_output[0][0], | ||
761 | "%s is not on ide driver white list.", | ||
762 | auide_hwif.drive->id->model); | ||
763 | for ( i=strlen(&warning_output[0][0]) ; i<76; i++ ){ | ||
764 | sprintf(&warning_output[0][i]," "); | ||
765 | } | ||
766 | |||
767 | sprintf(&warning_output[1][0], | ||
768 | "To add %s please read 'Documentation/mips/AU1xxx_IDE.README'.", | ||
769 | auide_hwif.drive->id->model); | ||
770 | for ( i=strlen(&warning_output[1][0]) ; i<76; i++ ){ | ||
771 | sprintf(&warning_output[1][i]," "); | ||
772 | } | ||
773 | |||
774 | printk("\n****************************************"); | ||
775 | printk("****************************************\n"); | ||
776 | printk("* %s *\n",&warning_output[0][0]); | ||
777 | printk("* Switch to safe MWDMA Mode! "); | ||
778 | printk(" *\n"); | ||
779 | printk("* %s *\n",&warning_output[1][0]); | ||
780 | printk("****************************************"); | ||
781 | printk("****************************************\n\n"); | ||
782 | } | ||
783 | #else | 541 | #else |
784 | source_dev_tab.dev_id = DSCR_CMD0_ALWAYS; | 542 | flags = DEV_FLAGS_SYNC; |
785 | source_dev_tab.dev_tsize = 8; | ||
786 | source_dev_tab.dev_devwidth = 32; | ||
787 | source_dev_tab.dev_physaddr = (u32)AU1XXX_ATA_PHYS_ADDR; | ||
788 | source_dev_tab.dev_intlevel = 0; | ||
789 | source_dev_tab.dev_intpolarity = 0; | ||
790 | #endif | 543 | #endif |
791 | 544 | ||
792 | #if CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON | 545 | /* setup dev_tab for tx channel */ |
793 | /* set flags for tx channel */ | 546 | auide_init_dbdma_dev( &source_dev_tab, |
794 | source_dev_tab.dev_flags = DEV_FLAGS_OUT | 547 | dev_id, |
795 | | DEV_FLAGS_SYNC | 548 | tsize, devwidth, DEV_FLAGS_OUT | flags); |
796 | | DEV_FLAGS_BURSTABLE; | 549 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
797 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 550 | |
798 | /* set flags for rx channel */ | 551 | auide_init_dbdma_dev( &source_dev_tab, |
799 | source_dev_tab.dev_flags = DEV_FLAGS_IN | 552 | dev_id, |
800 | | DEV_FLAGS_SYNC | 553 | tsize, devwidth, DEV_FLAGS_IN | flags); |
801 | | DEV_FLAGS_BURSTABLE; | 554 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
802 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 555 | |
556 | /* We also need to add a target device for the DMA */ | ||
557 | auide_init_dbdma_dev( &target_dev_tab, | ||
558 | (u32)DSCR_CMD0_ALWAYS, | ||
559 | tsize, devwidth, DEV_FLAGS_ANYUSE); | ||
560 | auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); | ||
561 | |||
562 | /* Get a channel for TX */ | ||
563 | auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, | ||
564 | auide->tx_dev_id, | ||
565 | auide_ddma_tx_callback, | ||
566 | (void*)auide); | ||
567 | |||
568 | /* Get a channel for RX */ | ||
569 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
570 | auide->target_dev_id, | ||
571 | auide_ddma_rx_callback, | ||
572 | (void*)auide); | ||
573 | |||
574 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
575 | NUM_DESCRIPTORS); | ||
576 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
577 | NUM_DESCRIPTORS); | ||
578 | |||
579 | hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, | ||
580 | PRD_ENTRIES * PRD_BYTES, /* 1 Page */ | ||
581 | &hwif->dmatable_dma, GFP_KERNEL); | ||
582 | |||
583 | au1xxx_dbdma_start( auide->tx_chan ); | ||
584 | au1xxx_dbdma_start( auide->rx_chan ); | ||
585 | |||
586 | return 0; | ||
587 | } | ||
803 | #else | 588 | #else |
804 | /* set flags for tx channel */ | 589 | |
805 | source_dev_tab.dev_flags = DEV_FLAGS_OUT | DEV_FLAGS_SYNC; | 590 | static int auide_ddma_init( _auide_hwif *auide ) |
806 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | 591 | { |
807 | /* set flags for rx channel */ | 592 | dbdev_tab_t source_dev_tab; |
808 | source_dev_tab.dev_flags = DEV_FLAGS_IN | DEV_FLAGS_SYNC; | 593 | int flags; |
809 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | ||
810 | #endif | ||
811 | 594 | ||
812 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 595 | #ifdef IDE_AU1XXX_BURSTMODE |
813 | 596 | flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE; | |
814 | auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); | 597 | #else |
815 | 598 | flags = DEV_FLAGS_SYNC; | |
816 | /* Get a channel for TX */ | ||
817 | auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id, | ||
818 | auide->tx_dev_id, | ||
819 | auide_ddma_tx_callback, | ||
820 | (void*)auide); | ||
821 | /* Get a channel for RX */ | ||
822 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
823 | auide->target_dev_id, | ||
824 | auide_ddma_rx_callback, | ||
825 | (void*)auide); | ||
826 | #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */ | ||
827 | /* | ||
828 | * Note: if call back is not enabled, update ctp->cur_ptr manually | ||
829 | */ | ||
830 | auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, | ||
831 | auide->tx_dev_id, | ||
832 | NULL, | ||
833 | (void*)auide); | ||
834 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
835 | DSCR_CMD0_ALWAYS, | ||
836 | NULL, | ||
837 | (void*)auide); | ||
838 | #endif | 599 | #endif |
839 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
840 | NUM_DESCRIPTORS); | ||
841 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
842 | NUM_DESCRIPTORS); | ||
843 | 600 | ||
844 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 601 | /* setup dev_tab for tx channel */ |
845 | hwif->dmatable_cpu = dma_alloc_coherent(auide->dev, | 602 | auide_init_dbdma_dev( &source_dev_tab, |
846 | PRD_ENTRIES * PRD_BYTES, /* 1 Page */ | 603 | (u32)DSCR_CMD0_ALWAYS, |
847 | &hwif->dmatable_dma, GFP_KERNEL); | 604 | 8, 32, DEV_FLAGS_OUT | flags); |
848 | 605 | auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); | |
849 | auide->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, | 606 | |
850 | GFP_KERNEL|GFP_DMA); | 607 | auide_init_dbdma_dev( &source_dev_tab, |
851 | if (auide->sg_table == NULL) { | 608 | (u32)DSCR_CMD0_ALWAYS, |
852 | return -ENOMEM; | 609 | 8, 32, DEV_FLAGS_IN | flags); |
853 | } | 610 | auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); |
854 | #endif | 611 | |
855 | au1xxx_dbdma_start( auide->tx_chan ); | 612 | /* Get a channel for TX */ |
856 | au1xxx_dbdma_start( auide->rx_chan ); | 613 | auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS, |
857 | return 0; | 614 | auide->tx_dev_id, |
615 | NULL, | ||
616 | (void*)auide); | ||
617 | |||
618 | /* Get a channel for RX */ | ||
619 | auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id, | ||
620 | DSCR_CMD0_ALWAYS, | ||
621 | NULL, | ||
622 | (void*)auide); | ||
623 | |||
624 | auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan, | ||
625 | NUM_DESCRIPTORS); | ||
626 | auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, | ||
627 | NUM_DESCRIPTORS); | ||
628 | |||
629 | au1xxx_dbdma_start( auide->tx_chan ); | ||
630 | au1xxx_dbdma_start( auide->rx_chan ); | ||
631 | |||
632 | return 0; | ||
858 | } | 633 | } |
634 | #endif | ||
859 | 635 | ||
860 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) | 636 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) |
861 | { | 637 | { |
862 | int i; | 638 | int i; |
863 | #define ide_ioreg_t unsigned long | 639 | unsigned long *ata_regs = hw->io_ports; |
864 | ide_ioreg_t *ata_regs = hw->io_ports; | 640 | |
865 | 641 | /* FIXME? */ | |
866 | /* fixme */ | 642 | for (i = 0; i < IDE_CONTROL_OFFSET; i++) { |
867 | for (i = 0; i < IDE_CONTROL_OFFSET; i++) { | 643 | *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); |
868 | *ata_regs++ = (ide_ioreg_t) ahwif->regbase | 644 | } |
869 | + (ide_ioreg_t)(i << AU1XXX_ATA_REG_OFFSET); | 645 | |
870 | } | 646 | /* set the Alternative Status register */ |
871 | 647 | *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); | |
872 | /* set the Alternative Status register */ | ||
873 | *ata_regs = (ide_ioreg_t) ahwif->regbase | ||
874 | + (ide_ioreg_t)(14 << AU1XXX_ATA_REG_OFFSET); | ||
875 | } | 648 | } |
876 | 649 | ||
877 | static int au_ide_probe(struct device *dev) | 650 | static int au_ide_probe(struct device *dev) |
878 | { | 651 | { |
879 | struct platform_device *pdev = to_platform_device(dev); | 652 | struct platform_device *pdev = to_platform_device(dev); |
880 | _auide_hwif *ahwif = &auide_hwif; | 653 | _auide_hwif *ahwif = &auide_hwif; |
881 | ide_hwif_t *hwif; | 654 | ide_hwif_t *hwif; |
882 | struct resource *res; | 655 | struct resource *res; |
883 | int ret = 0; | 656 | int ret = 0; |
884 | 657 | ||
885 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) | 658 | #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) |
886 | char *mode = "MWDMA2"; | 659 | char *mode = "MWDMA2"; |
887 | #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) | 660 | #elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA) |
888 | char *mode = "PIO+DDMA(offload)"; | 661 | char *mode = "PIO+DDMA(offload)"; |
889 | #endif | 662 | #endif |
890 | 663 | ||
891 | memset(&auide_hwif, 0, sizeof(_auide_hwif)); | 664 | memset(&auide_hwif, 0, sizeof(_auide_hwif)); |
892 | auide_hwif.dev = 0; | 665 | auide_hwif.dev = 0; |
893 | 666 | ||
894 | ahwif->dev = dev; | 667 | ahwif->dev = dev; |
895 | ahwif->irq = platform_get_irq(pdev, 0); | 668 | ahwif->irq = platform_get_irq(pdev, 0); |
@@ -902,11 +675,11 @@ static int au_ide_probe(struct device *dev) | |||
902 | goto out; | 675 | goto out; |
903 | } | 676 | } |
904 | 677 | ||
905 | if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { | 678 | if (!request_mem_region (res->start, res->end-res->start, pdev->name)) { |
906 | pr_debug("%s: request_mem_region failed\n", DRV_NAME); | 679 | pr_debug("%s: request_mem_region failed\n", DRV_NAME); |
907 | ret = -EBUSY; | 680 | ret = -EBUSY; |
908 | goto out; | 681 | goto out; |
909 | } | 682 | } |
910 | 683 | ||
911 | ahwif->regbase = (u32)ioremap(res->start, res->end-res->start); | 684 | ahwif->regbase = (u32)ioremap(res->start, res->end-res->start); |
912 | if (ahwif->regbase == 0) { | 685 | if (ahwif->regbase == 0) { |
@@ -914,130 +687,92 @@ static int au_ide_probe(struct device *dev) | |||
914 | goto out; | 687 | goto out; |
915 | } | 688 | } |
916 | 689 | ||
917 | hwif = &ide_hwifs[pdev->id]; | 690 | /* FIXME: This might possibly break PCMCIA IDE devices */ |
691 | |||
692 | hwif = &ide_hwifs[pdev->id]; | ||
918 | hw_regs_t *hw = &hwif->hw; | 693 | hw_regs_t *hw = &hwif->hw; |
919 | hwif->irq = hw->irq = ahwif->irq; | 694 | hwif->irq = hw->irq = ahwif->irq; |
920 | hwif->chipset = ide_au1xxx; | 695 | hwif->chipset = ide_au1xxx; |
921 | 696 | ||
922 | auide_setup_ports(hw, ahwif); | 697 | auide_setup_ports(hw, ahwif); |
923 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); | 698 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); |
924 | 699 | ||
925 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ | 700 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ |
926 | hwif->rqsize = CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ; | ||
927 | hwif->rqsize = ((hwif->rqsize > AU1XXX_ATA_RQSIZE) | ||
928 | || (hwif->rqsize < 32)) ? AU1XXX_ATA_RQSIZE : hwif->rqsize; | ||
929 | #else /* if kernel config is not set */ | ||
930 | hwif->rqsize = AU1XXX_ATA_RQSIZE; | ||
931 | #endif | ||
932 | |||
933 | hwif->ultra_mask = 0x0; /* Disable Ultra DMA */ | ||
934 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 701 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
935 | hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ | 702 | hwif->mwdma_mask = 0x07; /* Multimode-2 DMA */ |
936 | hwif->swdma_mask = 0x07; | 703 | hwif->swdma_mask = 0x00; |
937 | #else | 704 | #else |
938 | hwif->mwdma_mask = 0x0; | 705 | hwif->mwdma_mask = 0x0; |
939 | hwif->swdma_mask = 0x0; | 706 | hwif->swdma_mask = 0x0; |
707 | #endif | ||
708 | |||
709 | hwif->noprobe = 0; | ||
710 | hwif->drives[0].unmask = 1; | ||
711 | hwif->drives[1].unmask = 1; | ||
712 | |||
713 | /* hold should be on in all cases */ | ||
714 | hwif->hold = 1; | ||
715 | hwif->mmio = 2; | ||
716 | |||
717 | /* If the user has selected DDMA assisted copies, | ||
718 | then set up a few local I/O function entry points | ||
719 | */ | ||
720 | |||
721 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | ||
722 | hwif->INSW = auide_insw; | ||
723 | hwif->OUTSW = auide_outsw; | ||
940 | #endif | 724 | #endif |
941 | //hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET]; | 725 | |
942 | hwif->noprobe = 0; | 726 | hwif->tuneproc = &auide_tune_drive; |
943 | hwif->drives[0].unmask = 1; | 727 | hwif->speedproc = &auide_tune_chipset; |
944 | hwif->drives[1].unmask = 1; | ||
945 | |||
946 | /* hold should be on in all cases */ | ||
947 | hwif->hold = 1; | ||
948 | hwif->mmio = 2; | ||
949 | |||
950 | /* set up local I/O function entry points */ | ||
951 | hwif->INB = auide_inb; | ||
952 | hwif->INW = auide_inw; | ||
953 | hwif->INL = auide_inl; | ||
954 | hwif->INSW = auide_insw; | ||
955 | hwif->INSL = auide_insl; | ||
956 | hwif->OUTB = auide_outb; | ||
957 | hwif->OUTBSYNC = auide_outbsync; | ||
958 | hwif->OUTW = auide_outw; | ||
959 | hwif->OUTL = auide_outl; | ||
960 | hwif->OUTSW = auide_outsw; | ||
961 | hwif->OUTSL = auide_outsl; | ||
962 | |||
963 | hwif->tuneproc = &auide_tune_drive; | ||
964 | hwif->speedproc = &auide_tune_chipset; | ||
965 | 728 | ||
966 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA | 729 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA |
967 | hwif->ide_dma_off_quietly = &auide_dma_off_quietly; | 730 | hwif->ide_dma_off_quietly = &auide_dma_off_quietly; |
968 | hwif->ide_dma_timeout = &auide_dma_timeout; | 731 | hwif->ide_dma_timeout = &auide_dma_timeout; |
969 | 732 | ||
970 | hwif->ide_dma_check = &auide_dma_check; | 733 | hwif->ide_dma_check = &auide_dma_check; |
971 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; | 734 | hwif->dma_exec_cmd = &auide_dma_exec_cmd; |
972 | hwif->dma_start = &auide_dma_start; | 735 | hwif->dma_start = &auide_dma_start; |
973 | hwif->ide_dma_end = &auide_dma_end; | 736 | hwif->ide_dma_end = &auide_dma_end; |
974 | hwif->dma_setup = &auide_dma_setup; | 737 | hwif->dma_setup = &auide_dma_setup; |
975 | hwif->ide_dma_test_irq = &auide_dma_test_irq; | 738 | hwif->ide_dma_test_irq = &auide_dma_test_irq; |
976 | hwif->ide_dma_host_off = &auide_dma_host_off; | 739 | hwif->ide_dma_host_off = &auide_dma_host_off; |
977 | hwif->ide_dma_host_on = &auide_dma_host_on; | 740 | hwif->ide_dma_host_on = &auide_dma_host_on; |
978 | hwif->ide_dma_lostirq = &auide_dma_lostirq; | 741 | hwif->ide_dma_lostirq = &auide_dma_lostirq; |
979 | hwif->ide_dma_on = &auide_dma_on; | 742 | hwif->ide_dma_on = &auide_dma_on; |
980 | 743 | ||
981 | hwif->autodma = 1; | 744 | hwif->autodma = 1; |
982 | hwif->drives[0].autodma = hwif->autodma; | 745 | hwif->drives[0].autodma = hwif->autodma; |
983 | hwif->drives[1].autodma = hwif->autodma; | 746 | hwif->drives[1].autodma = hwif->autodma; |
984 | hwif->atapi_dma = 1; | 747 | hwif->atapi_dma = 1; |
985 | hwif->drives[0].using_dma = 1; | 748 | |
986 | hwif->drives[1].using_dma = 1; | ||
987 | #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ | 749 | #else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ |
988 | hwif->autodma = 0; | 750 | hwif->autodma = 0; |
989 | hwif->channel = 0; | 751 | hwif->channel = 0; |
990 | hwif->hold = 1; | 752 | hwif->hold = 1; |
991 | hwif->select_data = 0; /* no chipset-specific code */ | 753 | hwif->select_data = 0; /* no chipset-specific code */ |
992 | hwif->config_data = 0; /* no chipset-specific code */ | 754 | hwif->config_data = 0; /* no chipset-specific code */ |
993 | 755 | ||
994 | hwif->drives[0].autodma = 0; | 756 | hwif->drives[0].autodma = 0; |
995 | hwif->drives[0].drive_data = 0; /* no drive data */ | 757 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ |
996 | hwif->drives[0].using_dma = 0; | ||
997 | hwif->drives[0].waiting_for_dma = 0; | ||
998 | hwif->drives[0].autotune = 1; /* 1=autotune, 2=noautotune, 0=default */ | ||
999 | /* secondary hdd not supported */ | ||
1000 | hwif->drives[1].autodma = 0; | ||
1001 | |||
1002 | hwif->drives[1].drive_data = 0; | ||
1003 | hwif->drives[1].using_dma = 0; | ||
1004 | hwif->drives[1].waiting_for_dma = 0; | ||
1005 | hwif->drives[1].autotune = 2; /* 1=autotune, 2=noautotune, 0=default */ | ||
1006 | #endif | ||
1007 | hwif->drives[0].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ | ||
1008 | hwif->drives[1].io_32bit = 0; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ | ||
1009 | |||
1010 | /*Register Driver with PM Framework*/ | ||
1011 | #ifdef CONFIG_PM | ||
1012 | auide_hwif.pm.lock = SPIN_LOCK_UNLOCKED; | ||
1013 | auide_hwif.pm.stopped = 0; | ||
1014 | |||
1015 | auide_hwif.pm.dev = new_au1xxx_power_device( "ide", | ||
1016 | &au1200ide_pm_callback, | ||
1017 | NULL); | ||
1018 | if ( auide_hwif.pm.dev == NULL ) | ||
1019 | printk(KERN_INFO "Unable to create a power management \ | ||
1020 | device entry for the au1200-IDE.\n"); | ||
1021 | else | ||
1022 | printk(KERN_INFO "Power management device entry for the \ | ||
1023 | au1200-IDE loaded.\n"); | ||
1024 | #endif | 758 | #endif |
759 | hwif->drives[0].no_io_32bit = 1; | ||
1025 | 760 | ||
1026 | auide_hwif.hwif = hwif; | 761 | auide_hwif.hwif = hwif; |
1027 | hwif->hwif_data = &auide_hwif; | 762 | hwif->hwif_data = &auide_hwif; |
1028 | 763 | ||
1029 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA | 764 | #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA |
1030 | auide_ddma_init(&auide_hwif); | 765 | auide_ddma_init(&auide_hwif); |
1031 | dbdma_init_done = 1; | 766 | dbdma_init_done = 1; |
1032 | #endif | 767 | #endif |
1033 | 768 | ||
1034 | probe_hwif_init(hwif); | 769 | probe_hwif_init(hwif); |
1035 | dev_set_drvdata(dev, hwif); | 770 | dev_set_drvdata(dev, hwif); |
1036 | 771 | ||
1037 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); | 772 | printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); |
1038 | 773 | ||
1039 | out: | 774 | out: |
1040 | return ret; | 775 | return ret; |
1041 | } | 776 | } |
1042 | 777 | ||
1043 | static int au_ide_remove(struct device *dev) | 778 | static int au_ide_remove(struct device *dev) |
@@ -1045,7 +780,7 @@ static int au_ide_remove(struct device *dev) | |||
1045 | struct platform_device *pdev = to_platform_device(dev); | 780 | struct platform_device *pdev = to_platform_device(dev); |
1046 | struct resource *res; | 781 | struct resource *res; |
1047 | ide_hwif_t *hwif = dev_get_drvdata(dev); | 782 | ide_hwif_t *hwif = dev_get_drvdata(dev); |
1048 | _auide_hwif *ahwif = &auide_hwif; | 783 | _auide_hwif *ahwif = &auide_hwif; |
1049 | 784 | ||
1050 | ide_unregister(hwif - ide_hwifs); | 785 | ide_unregister(hwif - ide_hwifs); |
1051 | 786 | ||
@@ -1069,180 +804,11 @@ static int __init au_ide_init(void) | |||
1069 | return driver_register(&au1200_ide_driver); | 804 | return driver_register(&au1200_ide_driver); |
1070 | } | 805 | } |
1071 | 806 | ||
1072 | static void __init au_ide_exit(void) | 807 | static void __exit au_ide_exit(void) |
1073 | { | 808 | { |
1074 | driver_unregister(&au1200_ide_driver); | 809 | driver_unregister(&au1200_ide_driver); |
1075 | } | 810 | } |
1076 | 811 | ||
1077 | #ifdef CONFIG_PM | ||
1078 | int au1200ide_pm_callback( au1xxx_power_dev_t *dev,\ | ||
1079 | au1xxx_request_t request, void *data) { | ||
1080 | |||
1081 | unsigned int d, err = 0; | ||
1082 | unsigned long flags; | ||
1083 | |||
1084 | spin_lock_irqsave(auide_hwif.pm.lock, flags); | ||
1085 | |||
1086 | switch (request){ | ||
1087 | case AU1XXX_PM_SLEEP: | ||
1088 | err = au1xxxide_pm_sleep(dev); | ||
1089 | break; | ||
1090 | case AU1XXX_PM_WAKEUP: | ||
1091 | d = *((unsigned int*)data); | ||
1092 | if ( d > 0 && d <= 99) { | ||
1093 | err = au1xxxide_pm_standby(dev); | ||
1094 | } | ||
1095 | else { | ||
1096 | err = au1xxxide_pm_resume(dev); | ||
1097 | } | ||
1098 | break; | ||
1099 | case AU1XXX_PM_GETSTATUS: | ||
1100 | err = au1xxxide_pm_getstatus(dev); | ||
1101 | break; | ||
1102 | case AU1XXX_PM_ACCESS: | ||
1103 | err = au1xxxide_pm_access(dev); | ||
1104 | break; | ||
1105 | case AU1XXX_PM_IDLE: | ||
1106 | err = au1xxxide_pm_idle(dev); | ||
1107 | break; | ||
1108 | case AU1XXX_PM_CLEANUP: | ||
1109 | err = au1xxxide_pm_cleanup(dev); | ||
1110 | break; | ||
1111 | default: | ||
1112 | err = -1; | ||
1113 | break; | ||
1114 | } | ||
1115 | |||
1116 | spin_unlock_irqrestore(auide_hwif.pm.lock, flags); | ||
1117 | |||
1118 | return err; | ||
1119 | } | ||
1120 | |||
1121 | static int au1xxxide_pm_standby( au1xxx_power_dev_t *dev ) { | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | static int au1xxxide_pm_sleep( au1xxx_power_dev_t *dev ) { | ||
1126 | |||
1127 | int retval; | ||
1128 | ide_hwif_t *hwif = auide_hwif.hwif; | ||
1129 | struct request rq; | ||
1130 | struct request_pm_state rqpm; | ||
1131 | ide_task_t args; | ||
1132 | |||
1133 | if(auide_hwif.pm.stopped) | ||
1134 | return -1; | ||
1135 | |||
1136 | /* | ||
1137 | * wait until hard disc is ready | ||
1138 | */ | ||
1139 | if ( wait_for_ready(&hwif->drives[0], 35000) ) { | ||
1140 | printk("Wait for drive sleep timeout!\n"); | ||
1141 | retval = -1; | ||
1142 | } | ||
1143 | |||
1144 | /* | ||
1145 | * sequenz to tell the high level ide driver that pm is resuming | ||
1146 | */ | ||
1147 | memset(&rq, 0, sizeof(rq)); | ||
1148 | memset(&rqpm, 0, sizeof(rqpm)); | ||
1149 | memset(&args, 0, sizeof(args)); | ||
1150 | rq.flags = REQ_PM_SUSPEND; | ||
1151 | rq.special = &args; | ||
1152 | rq.pm = &rqpm; | ||
1153 | rqpm.pm_step = ide_pm_state_start_suspend; | ||
1154 | rqpm.pm_state = PMSG_SUSPEND; | ||
1155 | |||
1156 | retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_wait); | ||
1157 | |||
1158 | if (wait_for_ready (&hwif->drives[0], 35000)) { | ||
1159 | printk("Wait for drive sleep timeout!\n"); | ||
1160 | retval = -1; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * stop dbdma channels | ||
1165 | */ | ||
1166 | au1xxx_dbdma_reset(auide_hwif.tx_chan); | ||
1167 | au1xxx_dbdma_reset(auide_hwif.rx_chan); | ||
1168 | |||
1169 | auide_hwif.pm.stopped = 1; | ||
1170 | |||
1171 | return retval; | ||
1172 | } | ||
1173 | |||
1174 | static int au1xxxide_pm_resume( au1xxx_power_dev_t *dev ) { | ||
1175 | |||
1176 | int retval; | ||
1177 | ide_hwif_t *hwif = auide_hwif.hwif; | ||
1178 | struct request rq; | ||
1179 | struct request_pm_state rqpm; | ||
1180 | ide_task_t args; | ||
1181 | |||
1182 | if(!auide_hwif.pm.stopped) | ||
1183 | return -1; | ||
1184 | |||
1185 | /* | ||
1186 | * start dbdma channels | ||
1187 | */ | ||
1188 | au1xxx_dbdma_start(auide_hwif.tx_chan); | ||
1189 | au1xxx_dbdma_start(auide_hwif.rx_chan); | ||
1190 | |||
1191 | /* | ||
1192 | * wait until hard disc is ready | ||
1193 | */ | ||
1194 | if (wait_for_ready ( &hwif->drives[0], 35000)) { | ||
1195 | printk("Wait for drive wake up timeout!\n"); | ||
1196 | retval = -1; | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * sequenz to tell the high level ide driver that pm is resuming | ||
1201 | */ | ||
1202 | memset(&rq, 0, sizeof(rq)); | ||
1203 | memset(&rqpm, 0, sizeof(rqpm)); | ||
1204 | memset(&args, 0, sizeof(args)); | ||
1205 | rq.flags = REQ_PM_RESUME; | ||
1206 | rq.special = &args; | ||
1207 | rq.pm = &rqpm; | ||
1208 | rqpm.pm_step = ide_pm_state_start_resume; | ||
1209 | rqpm.pm_state = PMSG_ON; | ||
1210 | |||
1211 | retval = ide_do_drive_cmd(&hwif->drives[0], &rq, ide_head_wait); | ||
1212 | |||
1213 | /* | ||
1214 | * wait for hard disc | ||
1215 | */ | ||
1216 | if ( wait_for_ready(&hwif->drives[0], 35000) ) { | ||
1217 | printk("Wait for drive wake up timeout!\n"); | ||
1218 | retval = -1; | ||
1219 | } | ||
1220 | |||
1221 | auide_hwif.pm.stopped = 0; | ||
1222 | |||
1223 | return retval; | ||
1224 | } | ||
1225 | |||
1226 | static int au1xxxide_pm_getstatus( au1xxx_power_dev_t *dev ) { | ||
1227 | return dev->cur_state; | ||
1228 | } | ||
1229 | |||
1230 | static int au1xxxide_pm_access( au1xxx_power_dev_t *dev ) { | ||
1231 | if (dev->cur_state != AWAKE_STATE) | ||
1232 | return 0; | ||
1233 | else | ||
1234 | return -1; | ||
1235 | } | ||
1236 | |||
1237 | static int au1xxxide_pm_idle( au1xxx_power_dev_t *dev ) { | ||
1238 | return 0; | ||
1239 | } | ||
1240 | |||
1241 | static int au1xxxide_pm_cleanup( au1xxx_power_dev_t *dev ) { | ||
1242 | return 0; | ||
1243 | } | ||
1244 | #endif /* CONFIG_PM */ | ||
1245 | |||
1246 | MODULE_LICENSE("GPL"); | 812 | MODULE_LICENSE("GPL"); |
1247 | MODULE_DESCRIPTION("AU1200 IDE driver"); | 813 | MODULE_DESCRIPTION("AU1200 IDE driver"); |
1248 | 814 | ||
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index af526b671c4e..4ee597d08797 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -622,12 +622,18 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d) | |||
622 | ide_hwif_t *hwif; | 622 | ide_hwif_t *hwif; |
623 | int h; | 623 | int h; |
624 | 624 | ||
625 | /* | ||
626 | * Find an empty HWIF; if none available, return -ENOMEM. | ||
627 | */ | ||
625 | for (h = 0; h < MAX_HWIFS; ++h) { | 628 | for (h = 0; h < MAX_HWIFS; ++h) { |
626 | hwif = &ide_hwifs[h]; | 629 | hwif = &ide_hwifs[h]; |
627 | /* Find an empty HWIF */ | ||
628 | if (hwif->chipset == ide_unknown) | 630 | if (hwif->chipset == ide_unknown) |
629 | break; | 631 | break; |
630 | } | 632 | } |
633 | if (h == MAX_HWIFS) { | ||
634 | printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", d->name); | ||
635 | return -ENOMEM; | ||
636 | } | ||
631 | 637 | ||
632 | /* Get the CmdBlk and CtrlBlk Base Registers */ | 638 | /* Get the CmdBlk and CtrlBlk Base Registers */ |
633 | base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET; | 639 | base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET; |
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index 7161ce0ef5aa..86fb1e0286d3 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c | |||
@@ -80,6 +80,7 @@ static struct via_isa_bridge { | |||
80 | u16 flags; | 80 | u16 flags; |
81 | } via_isa_bridges[] = { | 81 | } via_isa_bridges[] = { |
82 | { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, | 82 | { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, |
83 | { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, | ||
83 | { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, | 84 | { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, |
84 | { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, | 85 | { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, |
85 | { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, | 86 | { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, |