diff options
author | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2009-01-02 10:12:50 -0500 |
---|---|---|
committer | Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> | 2009-01-02 10:12:50 -0500 |
commit | 201bffa46466b4afdf7d29db8eca3fa5decb39c8 (patch) | |
tree | 47e7d85563690547b67748092e587be1f31046b5 | |
parent | 631de3708d595d153e8a510a3608689290f4c0ed (diff) |
ide: use per-device request queue locks (v2)
* Move hack for flush requests from choose_drive() to do_ide_request().
* Add ide_plug_device() helper and convert core IDE code from using
per-hwgroup lock as a request lock to use the ->queue_lock instead.
* Remove no longer needed:
- choose_drive() function
- WAKEUP() macro
- 'sleeping' flag from ide_hwif_t
- 'service_{start,time}' fields from ide_drive_t
This patch results in much simpler and more maintainable code
(besides being a scalability improvement).
v2:
* Fixes/improvements based on review from Elias:
- take as many requests off the queue as possible
- remove now redundant BUG_ON()
Cc: Elias Oltmanns <eo@nebensachen.de>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-rw-r--r-- | drivers/ide/ide-io.c | 214 | ||||
-rw-r--r-- | drivers/ide/ide-park.c | 13 | ||||
-rw-r--r-- | drivers/ide/ide-probe.c | 3 | ||||
-rw-r--r-- | include/linux/ide.h | 4 |
4 files changed, 77 insertions, 157 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ab480042757a..bb3248abf47d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -667,85 +667,10 @@ void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) | |||
667 | drive->sleep = timeout + jiffies; | 667 | drive->sleep = timeout + jiffies; |
668 | drive->dev_flags |= IDE_DFLAG_SLEEPING; | 668 | drive->dev_flags |= IDE_DFLAG_SLEEPING; |
669 | } | 669 | } |
670 | |||
671 | EXPORT_SYMBOL(ide_stall_queue); | 670 | EXPORT_SYMBOL(ide_stall_queue); |
672 | 671 | ||
673 | #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) | ||
674 | |||
675 | /** | ||
676 | * choose_drive - select a drive to service | ||
677 | * @hwgroup: hardware group to select on | ||
678 | * | ||
679 | * choose_drive() selects the next drive which will be serviced. | ||
680 | * This is necessary because the IDE layer can't issue commands | ||
681 | * to both drives on the same cable, unlike SCSI. | ||
682 | */ | ||
683 | |||
684 | static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) | ||
685 | { | ||
686 | ide_drive_t *drive, *best; | ||
687 | |||
688 | repeat: | ||
689 | best = NULL; | ||
690 | drive = hwgroup->drive; | ||
691 | |||
692 | /* | ||
693 | * drive is doing pre-flush, ordered write, post-flush sequence. even | ||
694 | * though that is 3 requests, it must be seen as a single transaction. | ||
695 | * we must not preempt this drive until that is complete | ||
696 | */ | ||
697 | if (blk_queue_flushing(drive->queue)) { | ||
698 | /* | ||
699 | * small race where queue could get replugged during | ||
700 | * the 3-request flush cycle, just yank the plug since | ||
701 | * we want it to finish asap | ||
702 | */ | ||
703 | blk_remove_plug(drive->queue); | ||
704 | return drive; | ||
705 | } | ||
706 | |||
707 | do { | ||
708 | u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING); | ||
709 | u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING)); | ||
710 | |||
711 | if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) && | ||
712 | !elv_queue_empty(drive->queue)) { | ||
713 | if (best == NULL || | ||
714 | (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) || | ||
715 | (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) { | ||
716 | if (!blk_queue_plugged(drive->queue)) | ||
717 | best = drive; | ||
718 | } | ||
719 | } | ||
720 | } while ((drive = drive->next) != hwgroup->drive); | ||
721 | |||
722 | if (best && (best->dev_flags & IDE_DFLAG_NICE1) && | ||
723 | (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 && | ||
724 | best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { | ||
725 | long t = (signed long)(WAKEUP(best) - jiffies); | ||
726 | if (t >= WAIT_MIN_SLEEP) { | ||
727 | /* | ||
728 | * We *may* have some time to spare, but first let's see if | ||
729 | * someone can potentially benefit from our nice mood today.. | ||
730 | */ | ||
731 | drive = best->next; | ||
732 | do { | ||
733 | if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0 | ||
734 | && time_before(jiffies - best->service_time, WAKEUP(drive)) | ||
735 | && time_before(WAKEUP(drive), jiffies + t)) | ||
736 | { | ||
737 | ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); | ||
738 | goto repeat; | ||
739 | } | ||
740 | } while ((drive = drive->next) != best); | ||
741 | } | ||
742 | } | ||
743 | return best; | ||
744 | } | ||
745 | |||
746 | /* | 672 | /* |
747 | * Issue a new request to a drive from hwgroup | 673 | * Issue a new request to a drive from hwgroup |
748 | * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..); | ||
749 | * | 674 | * |
750 | * A hwgroup is a serialized group of IDE interfaces. Usually there is | 675 | * A hwgroup is a serialized group of IDE interfaces. Usually there is |
751 | * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) | 676 | * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) |
@@ -757,8 +682,7 @@ repeat: | |||
757 | * possibly along with many other devices. This is especially common in | 682 | * possibly along with many other devices. This is especially common in |
758 | * PCI-based systems with off-board IDE controller cards. | 683 | * PCI-based systems with off-board IDE controller cards. |
759 | * | 684 | * |
760 | * The IDE driver uses a per-hwgroup spinlock to protect | 685 | * The IDE driver uses a per-hwgroup lock to protect the hwgroup->busy flag. |
761 | * access to the request queues, and to protect the hwgroup->busy flag. | ||
762 | * | 686 | * |
763 | * The first thread into the driver for a particular hwgroup sets the | 687 | * The first thread into the driver for a particular hwgroup sets the |
764 | * hwgroup->busy flag to indicate that this hwgroup is now active, | 688 | * hwgroup->busy flag to indicate that this hwgroup is now active, |
@@ -780,61 +704,38 @@ repeat: | |||
780 | */ | 704 | */ |
781 | void do_ide_request(struct request_queue *q) | 705 | void do_ide_request(struct request_queue *q) |
782 | { | 706 | { |
783 | ide_drive_t *orig_drive = q->queuedata; | 707 | ide_drive_t *drive = q->queuedata; |
784 | ide_hwgroup_t *hwgroup = orig_drive->hwif->hwgroup; | 708 | ide_hwif_t *hwif = drive->hwif; |
785 | ide_drive_t *drive; | 709 | ide_hwgroup_t *hwgroup = hwif->hwgroup; |
786 | ide_hwif_t *hwif; | ||
787 | struct request *rq; | 710 | struct request *rq; |
788 | ide_startstop_t startstop; | 711 | ide_startstop_t startstop; |
789 | 712 | ||
790 | /* caller must own hwgroup->lock */ | 713 | /* |
791 | BUG_ON(!irqs_disabled()); | 714 | * drive is doing pre-flush, ordered write, post-flush sequence. even |
792 | 715 | * though that is 3 requests, it must be seen as a single transaction. | |
793 | while (!ide_lock_hwgroup(hwgroup)) { | 716 | * we must not preempt this drive until that is complete |
794 | drive = choose_drive(hwgroup); | 717 | */ |
795 | if (drive == NULL) { | 718 | if (blk_queue_flushing(q)) |
796 | int sleeping = 0; | ||
797 | unsigned long sleep = 0; /* shut up, gcc */ | ||
798 | hwgroup->rq = NULL; | ||
799 | drive = hwgroup->drive; | ||
800 | do { | ||
801 | if ((drive->dev_flags & IDE_DFLAG_SLEEPING) && | ||
802 | (sleeping == 0 || | ||
803 | time_before(drive->sleep, sleep))) { | ||
804 | sleeping = 1; | ||
805 | sleep = drive->sleep; | ||
806 | } | ||
807 | } while ((drive = drive->next) != hwgroup->drive); | ||
808 | if (sleeping) { | ||
809 | /* | 719 | /* |
810 | * Take a short snooze, and then wake up this hwgroup again. | 720 | * small race where queue could get replugged during |
811 | * This gives other hwgroups on the same a chance to | 721 | * the 3-request flush cycle, just yank the plug since |
812 | * play fairly with us, just in case there are big differences | 722 | * we want it to finish asap |
813 | * in relative throughputs.. don't want to hog the cpu too much. | ||
814 | */ | 723 | */ |
815 | if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) | 724 | blk_remove_plug(q); |
816 | sleep = jiffies + WAIT_MIN_SLEEP; | ||
817 | #if 1 | ||
818 | if (timer_pending(&hwgroup->timer)) | ||
819 | printk(KERN_CRIT "ide_set_handler: timer already active\n"); | ||
820 | #endif | ||
821 | /* so that ide_timer_expiry knows what to do */ | ||
822 | hwgroup->sleeping = 1; | ||
823 | hwgroup->req_gen_timer = hwgroup->req_gen; | ||
824 | mod_timer(&hwgroup->timer, sleep); | ||
825 | /* we purposely leave hwgroup locked | ||
826 | * while sleeping */ | ||
827 | } else | ||
828 | ide_unlock_hwgroup(hwgroup); | ||
829 | 725 | ||
830 | /* no more work for this hwgroup (for now) */ | 726 | spin_unlock_irq(q->queue_lock); |
831 | goto plug_device; | 727 | spin_lock_irq(&hwgroup->lock); |
832 | } | ||
833 | 728 | ||
834 | if (drive != orig_drive) | 729 | if (!ide_lock_hwgroup(hwgroup)) { |
835 | goto plug_device; | 730 | repeat: |
731 | hwgroup->rq = NULL; | ||
836 | 732 | ||
837 | hwif = drive->hwif; | 733 | if (drive->dev_flags & IDE_DFLAG_SLEEPING) { |
734 | if (time_before(drive->sleep, jiffies)) { | ||
735 | ide_unlock_hwgroup(hwgroup); | ||
736 | goto plug_device; | ||
737 | } | ||
738 | } | ||
838 | 739 | ||
839 | if (hwif != hwgroup->hwif) { | 740 | if (hwif != hwgroup->hwif) { |
840 | /* | 741 | /* |
@@ -847,16 +748,20 @@ void do_ide_request(struct request_queue *q) | |||
847 | hwgroup->hwif = hwif; | 748 | hwgroup->hwif = hwif; |
848 | hwgroup->drive = drive; | 749 | hwgroup->drive = drive; |
849 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); | 750 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); |
850 | drive->service_start = jiffies; | ||
851 | 751 | ||
752 | spin_unlock_irq(&hwgroup->lock); | ||
753 | spin_lock_irq(q->queue_lock); | ||
852 | /* | 754 | /* |
853 | * we know that the queue isn't empty, but this can happen | 755 | * we know that the queue isn't empty, but this can happen |
854 | * if the q->prep_rq_fn() decides to kill a request | 756 | * if the q->prep_rq_fn() decides to kill a request |
855 | */ | 757 | */ |
856 | rq = elv_next_request(drive->queue); | 758 | rq = elv_next_request(drive->queue); |
759 | spin_unlock_irq(q->queue_lock); | ||
760 | spin_lock_irq(&hwgroup->lock); | ||
761 | |||
857 | if (!rq) { | 762 | if (!rq) { |
858 | ide_unlock_hwgroup(hwgroup); | 763 | ide_unlock_hwgroup(hwgroup); |
859 | break; | 764 | goto out; |
860 | } | 765 | } |
861 | 766 | ||
862 | /* | 767 | /* |
@@ -886,17 +791,21 @@ void do_ide_request(struct request_queue *q) | |||
886 | startstop = start_request(drive, rq); | 791 | startstop = start_request(drive, rq); |
887 | spin_lock_irq(&hwgroup->lock); | 792 | spin_lock_irq(&hwgroup->lock); |
888 | 793 | ||
889 | if (startstop == ide_stopped) { | 794 | if (startstop == ide_stopped) |
890 | ide_unlock_hwgroup(hwgroup); | 795 | goto repeat; |
891 | if (!elv_queue_empty(orig_drive->queue)) | 796 | } else |
892 | blk_plug_device(orig_drive->queue); | 797 | goto plug_device; |
893 | } | 798 | out: |
894 | } | 799 | spin_unlock_irq(&hwgroup->lock); |
800 | spin_lock_irq(q->queue_lock); | ||
895 | return; | 801 | return; |
896 | 802 | ||
897 | plug_device: | 803 | plug_device: |
898 | if (!elv_queue_empty(orig_drive->queue)) | 804 | spin_unlock_irq(&hwgroup->lock); |
899 | blk_plug_device(orig_drive->queue); | 805 | spin_lock_irq(q->queue_lock); |
806 | |||
807 | if (!elv_queue_empty(q)) | ||
808 | blk_plug_device(q); | ||
900 | } | 809 | } |
901 | 810 | ||
902 | /* | 811 | /* |
@@ -957,6 +866,17 @@ out: | |||
957 | return ret; | 866 | return ret; |
958 | } | 867 | } |
959 | 868 | ||
869 | static void ide_plug_device(ide_drive_t *drive) | ||
870 | { | ||
871 | struct request_queue *q = drive->queue; | ||
872 | unsigned long flags; | ||
873 | |||
874 | spin_lock_irqsave(q->queue_lock, flags); | ||
875 | if (!elv_queue_empty(q)) | ||
876 | blk_plug_device(q); | ||
877 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
878 | } | ||
879 | |||
960 | /** | 880 | /** |
961 | * ide_timer_expiry - handle lack of an IDE interrupt | 881 | * ide_timer_expiry - handle lack of an IDE interrupt |
962 | * @data: timer callback magic (hwgroup) | 882 | * @data: timer callback magic (hwgroup) |
@@ -974,10 +894,12 @@ out: | |||
974 | void ide_timer_expiry (unsigned long data) | 894 | void ide_timer_expiry (unsigned long data) |
975 | { | 895 | { |
976 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; | 896 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; |
897 | ide_drive_t *uninitialized_var(drive); | ||
977 | ide_handler_t *handler; | 898 | ide_handler_t *handler; |
978 | ide_expiry_t *expiry; | 899 | ide_expiry_t *expiry; |
979 | unsigned long flags; | 900 | unsigned long flags; |
980 | unsigned long wait = -1; | 901 | unsigned long wait = -1; |
902 | int plug_device = 0; | ||
981 | 903 | ||
982 | spin_lock_irqsave(&hwgroup->lock, flags); | 904 | spin_lock_irqsave(&hwgroup->lock, flags); |
983 | 905 | ||
@@ -989,12 +911,8 @@ void ide_timer_expiry (unsigned long data) | |||
989 | * or we were "sleeping" to give other devices a chance. | 911 | * or we were "sleeping" to give other devices a chance. |
990 | * Either way, we don't really want to complain about anything. | 912 | * Either way, we don't really want to complain about anything. |
991 | */ | 913 | */ |
992 | if (hwgroup->sleeping) { | ||
993 | hwgroup->sleeping = 0; | ||
994 | ide_unlock_hwgroup(hwgroup); | ||
995 | } | ||
996 | } else { | 914 | } else { |
997 | ide_drive_t *drive = hwgroup->drive; | 915 | drive = hwgroup->drive; |
998 | if (!drive) { | 916 | if (!drive) { |
999 | printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); | 917 | printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); |
1000 | hwgroup->handler = NULL; | 918 | hwgroup->handler = NULL; |
@@ -1042,17 +960,18 @@ void ide_timer_expiry (unsigned long data) | |||
1042 | ide_error(drive, "irq timeout", | 960 | ide_error(drive, "irq timeout", |
1043 | hwif->tp_ops->read_status(hwif)); | 961 | hwif->tp_ops->read_status(hwif)); |
1044 | } | 962 | } |
1045 | drive->service_time = jiffies - drive->service_start; | ||
1046 | spin_lock_irq(&hwgroup->lock); | 963 | spin_lock_irq(&hwgroup->lock); |
1047 | enable_irq(hwif->irq); | 964 | enable_irq(hwif->irq); |
1048 | if (startstop == ide_stopped) { | 965 | if (startstop == ide_stopped) { |
1049 | ide_unlock_hwgroup(hwgroup); | 966 | ide_unlock_hwgroup(hwgroup); |
1050 | if (!elv_queue_empty(drive->queue)) | 967 | plug_device = 1; |
1051 | blk_plug_device(drive->queue); | ||
1052 | } | 968 | } |
1053 | } | 969 | } |
1054 | } | 970 | } |
1055 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 971 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
972 | |||
973 | if (plug_device) | ||
974 | ide_plug_device(drive); | ||
1056 | } | 975 | } |
1057 | 976 | ||
1058 | /** | 977 | /** |
@@ -1146,10 +1065,11 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1146 | unsigned long flags; | 1065 | unsigned long flags; |
1147 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; | 1066 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; |
1148 | ide_hwif_t *hwif = hwgroup->hwif; | 1067 | ide_hwif_t *hwif = hwgroup->hwif; |
1149 | ide_drive_t *drive; | 1068 | ide_drive_t *uninitialized_var(drive); |
1150 | ide_handler_t *handler; | 1069 | ide_handler_t *handler; |
1151 | ide_startstop_t startstop; | 1070 | ide_startstop_t startstop; |
1152 | irqreturn_t irq_ret = IRQ_NONE; | 1071 | irqreturn_t irq_ret = IRQ_NONE; |
1072 | int plug_device = 0; | ||
1153 | 1073 | ||
1154 | spin_lock_irqsave(&hwgroup->lock, flags); | 1074 | spin_lock_irqsave(&hwgroup->lock, flags); |
1155 | 1075 | ||
@@ -1236,12 +1156,10 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1236 | * same irq as is currently being serviced here, and Linux | 1156 | * same irq as is currently being serviced here, and Linux |
1237 | * won't allow another of the same (on any CPU) until we return. | 1157 | * won't allow another of the same (on any CPU) until we return. |
1238 | */ | 1158 | */ |
1239 | drive->service_time = jiffies - drive->service_start; | ||
1240 | if (startstop == ide_stopped) { | 1159 | if (startstop == ide_stopped) { |
1241 | if (hwgroup->handler == NULL) { /* paranoia */ | 1160 | if (hwgroup->handler == NULL) { /* paranoia */ |
1242 | ide_unlock_hwgroup(hwgroup); | 1161 | ide_unlock_hwgroup(hwgroup); |
1243 | if (!elv_queue_empty(drive->queue)) | 1162 | plug_device = 1; |
1244 | blk_plug_device(drive->queue); | ||
1245 | } else | 1163 | } else |
1246 | printk(KERN_ERR "%s: %s: huh? expected NULL handler " | 1164 | printk(KERN_ERR "%s: %s: huh? expected NULL handler " |
1247 | "on exit\n", __func__, drive->name); | 1165 | "on exit\n", __func__, drive->name); |
@@ -1250,6 +1168,10 @@ out_handled: | |||
1250 | irq_ret = IRQ_HANDLED; | 1168 | irq_ret = IRQ_HANDLED; |
1251 | out: | 1169 | out: |
1252 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 1170 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
1171 | |||
1172 | if (plug_device) | ||
1173 | ide_plug_device(drive); | ||
1174 | |||
1253 | return irq_ret; | 1175 | return irq_ret; |
1254 | } | 1176 | } |
1255 | 1177 | ||
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 44c6787f8aeb..678454ac2483 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -16,16 +16,19 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
16 | spin_lock_irq(&hwgroup->lock); | 16 | spin_lock_irq(&hwgroup->lock); |
17 | if (drive->dev_flags & IDE_DFLAG_PARKED) { | 17 | if (drive->dev_flags & IDE_DFLAG_PARKED) { |
18 | int reset_timer = time_before(timeout, drive->sleep); | 18 | int reset_timer = time_before(timeout, drive->sleep); |
19 | int start_queue = 0; | ||
19 | 20 | ||
20 | drive->sleep = timeout; | 21 | drive->sleep = timeout; |
21 | wake_up_all(&ide_park_wq); | 22 | wake_up_all(&ide_park_wq); |
22 | if (reset_timer && hwgroup->sleeping && | 23 | if (reset_timer && del_timer(&hwgroup->timer)) |
23 | del_timer(&hwgroup->timer)) { | 24 | start_queue = 1; |
24 | hwgroup->sleeping = 0; | 25 | spin_unlock_irq(&hwgroup->lock); |
25 | ide_unlock_hwgroup(hwgroup); | 26 | |
27 | if (start_queue) { | ||
28 | spin_lock_irq(q->queue_lock); | ||
26 | blk_start_queueing(q); | 29 | blk_start_queueing(q); |
30 | spin_unlock_irq(q->queue_lock); | ||
27 | } | 31 | } |
28 | spin_unlock_irq(&hwgroup->lock); | ||
29 | return; | 32 | return; |
30 | } | 33 | } |
31 | spin_unlock_irq(&hwgroup->lock); | 34 | spin_unlock_irq(&hwgroup->lock); |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index f9efd069edc2..966b74c15773 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -881,8 +881,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
881 | * do not. | 881 | * do not. |
882 | */ | 882 | */ |
883 | 883 | ||
884 | q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock, | 884 | q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif)); |
885 | hwif_to_node(hwif)); | ||
886 | if (!q) | 885 | if (!q) |
887 | return 1; | 886 | return 1; |
888 | 887 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index f408d6123f14..5f86ad40ee7e 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -603,8 +603,6 @@ struct ide_drive_s { | |||
603 | unsigned long dev_flags; | 603 | unsigned long dev_flags; |
604 | 604 | ||
605 | unsigned long sleep; /* sleep until this time */ | 605 | unsigned long sleep; /* sleep until this time */ |
606 | unsigned long service_start; /* time we started last request */ | ||
607 | unsigned long service_time; /* service time of last request */ | ||
608 | unsigned long timeout; /* max time to wait for irq */ | 606 | unsigned long timeout; /* max time to wait for irq */ |
609 | 607 | ||
610 | special_t special; /* special action flags */ | 608 | special_t special; /* special action flags */ |
@@ -872,8 +870,6 @@ typedef struct hwgroup_s { | |||
872 | 870 | ||
873 | /* BOOL: protects all fields below */ | 871 | /* BOOL: protects all fields below */ |
874 | volatile int busy; | 872 | volatile int busy; |
875 | /* BOOL: wake us up on timer expiry */ | ||
876 | unsigned int sleeping : 1; | ||
877 | /* BOOL: polling active & poll_timeout field valid */ | 873 | /* BOOL: polling active & poll_timeout field valid */ |
878 | unsigned int polling : 1; | 874 | unsigned int polling : 1; |
879 | 875 | ||