diff options
Diffstat (limited to 'drivers/ide/ide-disk.c')
-rw-r--r-- | drivers/ide/ide-disk.c | 137 |
1 files changed, 53 insertions, 84 deletions
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 4e5767968d7f..4b441720b6ba 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = { | |||
681 | 681 | ||
682 | #endif /* CONFIG_PROC_FS */ | 682 | #endif /* CONFIG_PROC_FS */ |
683 | 683 | ||
684 | static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) | 684 | static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) |
685 | { | 685 | { |
686 | ide_drive_t *drive = q->queuedata; | 686 | ide_drive_t *drive = q->queuedata; |
687 | struct request *rq = flush_rq->end_io_data; | ||
688 | int good_sectors = rq->hard_nr_sectors; | ||
689 | int bad_sectors; | ||
690 | sector_t sector; | ||
691 | |||
692 | if (flush_rq->errors & ABRT_ERR) { | ||
693 | printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name); | ||
694 | blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE); | ||
695 | blk_queue_issue_flush_fn(drive->queue, NULL); | ||
696 | good_sectors = 0; | ||
697 | } else if (flush_rq->errors) { | ||
698 | good_sectors = 0; | ||
699 | if (blk_barrier_preflush(rq)) { | ||
700 | sector = ide_get_error_location(drive,flush_rq->buffer); | ||
701 | if ((sector >= rq->hard_sector) && | ||
702 | (sector < rq->hard_sector + rq->hard_nr_sectors)) | ||
703 | good_sectors = sector - rq->hard_sector; | ||
704 | } | ||
705 | } | ||
706 | |||
707 | if (flush_rq->errors) | ||
708 | printk(KERN_ERR "%s: failed barrier write: " | ||
709 | "sector=%Lx(good=%d/bad=%d)\n", | ||
710 | drive->name, (unsigned long long)rq->sector, | ||
711 | good_sectors, | ||
712 | (int) (rq->hard_nr_sectors-good_sectors)); | ||
713 | |||
714 | bad_sectors = rq->hard_nr_sectors - good_sectors; | ||
715 | |||
716 | if (good_sectors) | ||
717 | __ide_end_request(drive, rq, 1, good_sectors); | ||
718 | if (bad_sectors) | ||
719 | __ide_end_request(drive, rq, 0, bad_sectors); | ||
720 | } | ||
721 | |||
722 | static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) | ||
723 | { | ||
724 | ide_drive_t *drive = q->queuedata; | ||
725 | |||
726 | if (!drive->wcache) | ||
727 | return 0; | ||
728 | 687 | ||
729 | memset(rq->cmd, 0, sizeof(rq->cmd)); | 688 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
730 | 689 | ||
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq) | |||
735 | rq->cmd[0] = WIN_FLUSH_CACHE; | 694 | rq->cmd[0] = WIN_FLUSH_CACHE; |
736 | 695 | ||
737 | 696 | ||
738 | rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; | 697 | rq->flags |= REQ_DRIVE_TASK; |
739 | rq->buffer = rq->cmd; | 698 | rq->buffer = rq->cmd; |
740 | return 1; | ||
741 | } | 699 | } |
742 | 700 | ||
743 | static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, | 701 | static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, |
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg) | |||
794 | return 0; | 752 | return 0; |
795 | } | 753 | } |
796 | 754 | ||
755 | static void update_ordered(ide_drive_t *drive) | ||
756 | { | ||
757 | struct hd_driveid *id = drive->id; | ||
758 | unsigned ordered = QUEUE_ORDERED_NONE; | ||
759 | prepare_flush_fn *prep_fn = NULL; | ||
760 | issue_flush_fn *issue_fn = NULL; | ||
761 | |||
762 | if (drive->wcache) { | ||
763 | unsigned long long capacity; | ||
764 | int barrier; | ||
765 | /* | ||
766 | * We must avoid issuing commands a drive does not | ||
767 | * understand or we may crash it. We check flush cache | ||
768 | * is supported. We also check we have the LBA48 flush | ||
769 | * cache if the drive capacity is too large. By this | ||
770 | * time we have trimmed the drive capacity if LBA48 is | ||
771 | * not available so we don't need to recheck that. | ||
772 | */ | ||
773 | capacity = idedisk_capacity(drive); | ||
774 | barrier = ide_id_has_flush_cache(id) && | ||
775 | (drive->addressing == 0 || capacity <= (1ULL << 28) || | ||
776 | ide_id_has_flush_cache_ext(id)); | ||
777 | |||
778 | printk(KERN_INFO "%s: cache flushes %ssupported\n", | ||
779 | drive->name, barrier ? "" : "not"); | ||
780 | |||
781 | if (barrier) { | ||
782 | ordered = QUEUE_ORDERED_DRAIN_FLUSH; | ||
783 | prep_fn = idedisk_prepare_flush; | ||
784 | issue_fn = idedisk_issue_flush; | ||
785 | } | ||
786 | } else | ||
787 | ordered = QUEUE_ORDERED_DRAIN; | ||
788 | |||
789 | blk_queue_ordered(drive->queue, ordered, prep_fn); | ||
790 | blk_queue_issue_flush_fn(drive->queue, issue_fn); | ||
791 | } | ||
792 | |||
797 | static int write_cache(ide_drive_t *drive, int arg) | 793 | static int write_cache(ide_drive_t *drive, int arg) |
798 | { | 794 | { |
799 | ide_task_t args; | 795 | ide_task_t args; |
800 | int err; | 796 | int err = 1; |
801 | |||
802 | if (!ide_id_has_flush_cache(drive->id)) | ||
803 | return 1; | ||
804 | 797 | ||
805 | memset(&args, 0, sizeof(ide_task_t)); | 798 | if (ide_id_has_flush_cache(drive->id)) { |
806 | args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? | 799 | memset(&args, 0, sizeof(ide_task_t)); |
800 | args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? | ||
807 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; | 801 | SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; |
808 | args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; | 802 | args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; |
809 | args.command_type = IDE_DRIVE_TASK_NO_DATA; | 803 | args.command_type = IDE_DRIVE_TASK_NO_DATA; |
810 | args.handler = &task_no_data_intr; | 804 | args.handler = &task_no_data_intr; |
805 | err = ide_raw_taskfile(drive, &args, NULL); | ||
806 | if (err == 0) | ||
807 | drive->wcache = arg; | ||
808 | } | ||
811 | 809 | ||
812 | err = ide_raw_taskfile(drive, &args, NULL); | 810 | update_ordered(drive); |
813 | if (err) | ||
814 | return err; | ||
815 | 811 | ||
816 | drive->wcache = arg; | 812 | return err; |
817 | return 0; | ||
818 | } | 813 | } |
819 | 814 | ||
820 | static int do_idedisk_flushcache (ide_drive_t *drive) | 815 | static int do_idedisk_flushcache (ide_drive_t *drive) |
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive) | |||
888 | { | 883 | { |
889 | struct hd_driveid *id = drive->id; | 884 | struct hd_driveid *id = drive->id; |
890 | unsigned long long capacity; | 885 | unsigned long long capacity; |
891 | int barrier; | ||
892 | 886 | ||
893 | idedisk_add_settings(drive); | 887 | idedisk_add_settings(drive); |
894 | 888 | ||
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive) | |||
992 | drive->wcache = 1; | 986 | drive->wcache = 1; |
993 | 987 | ||
994 | write_cache(drive, 1); | 988 | write_cache(drive, 1); |
995 | |||
996 | /* | ||
997 | * We must avoid issuing commands a drive does not understand | ||
998 | * or we may crash it. We check flush cache is supported. We also | ||
999 | * check we have the LBA48 flush cache if the drive capacity is | ||
1000 | * too large. By this time we have trimmed the drive capacity if | ||
1001 | * LBA48 is not available so we don't need to recheck that. | ||
1002 | */ | ||
1003 | barrier = 0; | ||
1004 | if (ide_id_has_flush_cache(id)) | ||
1005 | barrier = 1; | ||
1006 | if (drive->addressing == 1) { | ||
1007 | /* Can't issue the correct flush ? */ | ||
1008 | if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id)) | ||
1009 | barrier = 0; | ||
1010 | } | ||
1011 | |||
1012 | printk(KERN_INFO "%s: cache flushes %ssupported\n", | ||
1013 | drive->name, barrier ? "" : "not "); | ||
1014 | if (barrier) { | ||
1015 | blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH); | ||
1016 | drive->queue->prepare_flush_fn = idedisk_prepare_flush; | ||
1017 | drive->queue->end_flush_fn = idedisk_end_flush; | ||
1018 | blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush); | ||
1019 | } | ||
1020 | } | 989 | } |
1021 | 990 | ||
1022 | static void ide_cacheflush_p(ide_drive_t *drive) | 991 | static void ide_cacheflush_p(ide_drive_t *drive) |