aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2006-01-06 03:57:31 -0500
committerJens Axboe <axboe@suse.de>2006-01-06 03:57:31 -0500
commit3e087b575496b8aa445192f58e7d996b1cdfa121 (patch)
tree6ee355645e199a7e5c9aeae11c3143a8235d2a81
parent9a3dccc42556537a48f39ee9a9e7ab90a933f766 (diff)
[BLOCK] update IDE to use new blk_ordered for barriers
Update IDE to use new blk_ordered. This change makes the following behavior changes. * Partial completion of the barrier request is handled as failure of the whole ordered sequence. No more partial completion for barrier requests. * Any failure of pre or post flush request results in failure of the whole ordered sequence. So, successfully completed ordered sequence guarantees that all requests prior to the barrier made to physical medium and, then, the while barrier request made to the physical medium. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jens Axboe <axboe@suse.de>
-rw-r--r--drivers/ide/ide-disk.c137
-rw-r--r--drivers/ide/ide-io.c5
2 files changed, 54 insertions, 88 deletions
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4e5767968d7f..4b441720b6ba 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
681 681
682#endif /* CONFIG_PROC_FS */ 682#endif /* CONFIG_PROC_FS */
683 683
684static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq) 684static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
685{ 685{
686 ide_drive_t *drive = q->queuedata; 686 ide_drive_t *drive = q->queuedata;
687 struct request *rq = flush_rq->end_io_data;
688 int good_sectors = rq->hard_nr_sectors;
689 int bad_sectors;
690 sector_t sector;
691
692 if (flush_rq->errors & ABRT_ERR) {
693 printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
694 blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
695 blk_queue_issue_flush_fn(drive->queue, NULL);
696 good_sectors = 0;
697 } else if (flush_rq->errors) {
698 good_sectors = 0;
699 if (blk_barrier_preflush(rq)) {
700 sector = ide_get_error_location(drive,flush_rq->buffer);
701 if ((sector >= rq->hard_sector) &&
702 (sector < rq->hard_sector + rq->hard_nr_sectors))
703 good_sectors = sector - rq->hard_sector;
704 }
705 }
706
707 if (flush_rq->errors)
708 printk(KERN_ERR "%s: failed barrier write: "
709 "sector=%Lx(good=%d/bad=%d)\n",
710 drive->name, (unsigned long long)rq->sector,
711 good_sectors,
712 (int) (rq->hard_nr_sectors-good_sectors));
713
714 bad_sectors = rq->hard_nr_sectors - good_sectors;
715
716 if (good_sectors)
717 __ide_end_request(drive, rq, 1, good_sectors);
718 if (bad_sectors)
719 __ide_end_request(drive, rq, 0, bad_sectors);
720}
721
722static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
723{
724 ide_drive_t *drive = q->queuedata;
725
726 if (!drive->wcache)
727 return 0;
728 687
729 memset(rq->cmd, 0, sizeof(rq->cmd)); 688 memset(rq->cmd, 0, sizeof(rq->cmd));
730 689
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
735 rq->cmd[0] = WIN_FLUSH_CACHE; 694 rq->cmd[0] = WIN_FLUSH_CACHE;
736 695
737 696
738 rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER; 697 rq->flags |= REQ_DRIVE_TASK;
739 rq->buffer = rq->cmd; 698 rq->buffer = rq->cmd;
740 return 1;
741} 699}
742 700
743static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 701static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
794 return 0; 752 return 0;
795} 753}
796 754
755static void update_ordered(ide_drive_t *drive)
756{
757 struct hd_driveid *id = drive->id;
758 unsigned ordered = QUEUE_ORDERED_NONE;
759 prepare_flush_fn *prep_fn = NULL;
760 issue_flush_fn *issue_fn = NULL;
761
762 if (drive->wcache) {
763 unsigned long long capacity;
764 int barrier;
765 /*
766 * We must avoid issuing commands a drive does not
767 * understand or we may crash it. We check flush cache
768 * is supported. We also check we have the LBA48 flush
769 * cache if the drive capacity is too large. By this
770 * time we have trimmed the drive capacity if LBA48 is
771 * not available so we don't need to recheck that.
772 */
773 capacity = idedisk_capacity(drive);
774 barrier = ide_id_has_flush_cache(id) &&
775 (drive->addressing == 0 || capacity <= (1ULL << 28) ||
776 ide_id_has_flush_cache_ext(id));
777
778 printk(KERN_INFO "%s: cache flushes %ssupported\n",
779 drive->name, barrier ? "" : "not");
780
781 if (barrier) {
782 ordered = QUEUE_ORDERED_DRAIN_FLUSH;
783 prep_fn = idedisk_prepare_flush;
784 issue_fn = idedisk_issue_flush;
785 }
786 } else
787 ordered = QUEUE_ORDERED_DRAIN;
788
789 blk_queue_ordered(drive->queue, ordered, prep_fn);
790 blk_queue_issue_flush_fn(drive->queue, issue_fn);
791}
792
797static int write_cache(ide_drive_t *drive, int arg) 793static int write_cache(ide_drive_t *drive, int arg)
798{ 794{
799 ide_task_t args; 795 ide_task_t args;
800 int err; 796 int err = 1;
801
802 if (!ide_id_has_flush_cache(drive->id))
803 return 1;
804 797
805 memset(&args, 0, sizeof(ide_task_t)); 798 if (ide_id_has_flush_cache(drive->id)) {
806 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? 799 memset(&args, 0, sizeof(ide_task_t));
800 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
807 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; 801 SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
808 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; 802 args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
809 args.command_type = IDE_DRIVE_TASK_NO_DATA; 803 args.command_type = IDE_DRIVE_TASK_NO_DATA;
810 args.handler = &task_no_data_intr; 804 args.handler = &task_no_data_intr;
805 err = ide_raw_taskfile(drive, &args, NULL);
806 if (err == 0)
807 drive->wcache = arg;
808 }
811 809
812 err = ide_raw_taskfile(drive, &args, NULL); 810 update_ordered(drive);
813 if (err)
814 return err;
815 811
816 drive->wcache = arg; 812 return err;
817 return 0;
818} 813}
819 814
820static int do_idedisk_flushcache (ide_drive_t *drive) 815static int do_idedisk_flushcache (ide_drive_t *drive)
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
888{ 883{
889 struct hd_driveid *id = drive->id; 884 struct hd_driveid *id = drive->id;
890 unsigned long long capacity; 885 unsigned long long capacity;
891 int barrier;
892 886
893 idedisk_add_settings(drive); 887 idedisk_add_settings(drive);
894 888
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
992 drive->wcache = 1; 986 drive->wcache = 1;
993 987
994 write_cache(drive, 1); 988 write_cache(drive, 1);
995
996 /*
997 * We must avoid issuing commands a drive does not understand
998 * or we may crash it. We check flush cache is supported. We also
999 * check we have the LBA48 flush cache if the drive capacity is
1000 * too large. By this time we have trimmed the drive capacity if
1001 * LBA48 is not available so we don't need to recheck that.
1002 */
1003 barrier = 0;
1004 if (ide_id_has_flush_cache(id))
1005 barrier = 1;
1006 if (drive->addressing == 1) {
1007 /* Can't issue the correct flush ? */
1008 if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
1009 barrier = 0;
1010 }
1011
1012 printk(KERN_INFO "%s: cache flushes %ssupported\n",
1013 drive->name, barrier ? "" : "not ");
1014 if (barrier) {
1015 blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
1016 drive->queue->prepare_flush_fn = idedisk_prepare_flush;
1017 drive->queue->end_flush_fn = idedisk_end_flush;
1018 blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
1019 }
1020} 989}
1021 990
1022static void ide_cacheflush_p(ide_drive_t *drive) 991static void ide_cacheflush_p(ide_drive_t *drive)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8435b44a700b..b5dc6df8e67d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
119 if (!nr_sectors) 119 if (!nr_sectors)
120 nr_sectors = rq->hard_cur_sectors; 120 nr_sectors = rq->hard_cur_sectors;
121 121
122 if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors)) 122 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
123 ret = rq->nr_sectors != 0;
124 else
125 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
126 123
127 spin_unlock_irqrestore(&ide_lock, flags); 124 spin_unlock_irqrestore(&ide_lock, flags);
128 return ret; 125 return ret;