aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-io.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-01-06 11:20:50 -0500
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-01-06 11:20:50 -0500
commitb65fac32cfe3b2f98cd472fef400bd1c1340de23 (patch)
tree493a7e30e23e5413a9e5ad6102b8e91ebc02c069 /drivers/ide/ide-io.c
parent5b31f855f10d0053e738baa6d91fb6a3fad35119 (diff)
ide: merge ide_hwgroup_t with ide_hwif_t (v2)
* Merge ide_hwgroup_t with ide_hwif_t. * Cleanup init_irq() accordingly, then remove no longer needed ide_remove_port_from_hwgroup() and ide_ports[]. * Remove now unused HWGROUP() macro. While at it: * ide_dump_ata_error() fixups v2: * Fix ->quirk_list check in do_ide_request() (s/hwif->cur_dev/prev_port->cur_dev). Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r--drivers/ide/ide-io.c117
1 files changed, 58 insertions, 59 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8f371821c614..6ff82d7055b9 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -88,7 +88,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
88 ret = 0; 88 ret = 0;
89 89
90 if (ret == 0 && dequeue) 90 if (ret == 0 && dequeue)
91 drive->hwif->hwgroup->rq = NULL; 91 drive->hwif->rq = NULL;
92 92
93 return ret; 93 return ret;
94} 94}
@@ -107,7 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
108{ 108{
109 unsigned int nr_bytes = nr_sectors << 9; 109 unsigned int nr_bytes = nr_sectors << 9;
110 struct request *rq = drive->hwif->hwgroup->rq; 110 struct request *rq = drive->hwif->rq;
111 111
112 if (!nr_bytes) { 112 if (!nr_bytes) {
113 if (blk_pc_request(rq)) 113 if (blk_pc_request(rq))
@@ -160,8 +160,8 @@ EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
160 160
161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
162{ 162{
163 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; 163 ide_hwif_t *hwif = drive->hwif;
164 struct request *rq = hwgroup->rq; 164 struct request *rq = hwif->rq;
165 165
166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
167 ide_task_t *task = (ide_task_t *)rq->special; 167 ide_task_t *task = (ide_task_t *)rq->special;
@@ -186,7 +186,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
186 return; 186 return;
187 } 187 }
188 188
189 hwgroup->rq = NULL; 189 hwif->rq = NULL;
190 190
191 rq->errors = err; 191 rq->errors = err;
192 192
@@ -321,7 +321,8 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
321 321
322 err = ide_dump_status(drive, msg, stat); 322 err = ide_dump_status(drive, msg, stat);
323 323
324 if ((rq = HWGROUP(drive)->rq) == NULL) 324 rq = drive->hwif->rq;
325 if (rq == NULL)
325 return ide_stopped; 326 return ide_stopped;
326 327
327 /* retry only "normal" I/O: */ 328 /* retry only "normal" I/O: */
@@ -654,7 +655,7 @@ kill_rq:
654 * @timeout: time to stall for (jiffies) 655 * @timeout: time to stall for (jiffies)
655 * 656 *
656 * ide_stall_queue() can be used by a drive to give excess bandwidth back 657 * ide_stall_queue() can be used by a drive to give excess bandwidth back
657 * to the hwgroup by sleeping for timeout jiffies. 658 * to the port by sleeping for timeout jiffies.
658 */ 659 */
659 660
660void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 661void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
@@ -705,14 +706,13 @@ static inline void ide_unlock_host(struct ide_host *host)
705} 706}
706 707
707/* 708/*
708 * Issue a new request to a drive from hwgroup 709 * Issue a new request to a device.
709 */ 710 */
710void do_ide_request(struct request_queue *q) 711void do_ide_request(struct request_queue *q)
711{ 712{
712 ide_drive_t *drive = q->queuedata; 713 ide_drive_t *drive = q->queuedata;
713 ide_hwif_t *hwif = drive->hwif; 714 ide_hwif_t *hwif = drive->hwif;
714 struct ide_host *host = hwif->host; 715 struct ide_host *host = hwif->host;
715 ide_hwgroup_t *hwgroup = hwif->hwgroup;
716 struct request *rq = NULL; 716 struct request *rq = NULL;
717 ide_startstop_t startstop; 717 ide_startstop_t startstop;
718 718
@@ -734,13 +734,13 @@ void do_ide_request(struct request_queue *q)
734 if (ide_lock_host(host, hwif)) 734 if (ide_lock_host(host, hwif))
735 goto plug_device_2; 735 goto plug_device_2;
736 736
737 spin_lock_irq(&hwgroup->lock); 737 spin_lock_irq(&hwif->lock);
738 738
739 if (!ide_lock_port(hwif)) { 739 if (!ide_lock_port(hwif)) {
740 ide_hwif_t *prev_port; 740 ide_hwif_t *prev_port;
741repeat: 741repeat:
742 prev_port = hwif->host->cur_port; 742 prev_port = hwif->host->cur_port;
743 hwgroup->rq = NULL; 743 hwif->rq = NULL;
744 744
745 if (drive->dev_flags & IDE_DFLAG_SLEEPING) { 745 if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
746 if (time_before(drive->sleep, jiffies)) { 746 if (time_before(drive->sleep, jiffies)) {
@@ -755,15 +755,15 @@ repeat:
755 * set nIEN for previous port, drives in the 755 * set nIEN for previous port, drives in the
756 * quirk_list may not like intr setups/cleanups 756 * quirk_list may not like intr setups/cleanups
757 */ 757 */
758 if (prev_port && hwgroup->cur_dev->quirk_list == 0) 758 if (prev_port && prev_port->cur_dev->quirk_list == 0)
759 prev_port->tp_ops->set_irq(prev_port, 0); 759 prev_port->tp_ops->set_irq(prev_port, 0);
760 760
761 hwif->host->cur_port = hwif; 761 hwif->host->cur_port = hwif;
762 } 762 }
763 hwgroup->cur_dev = drive; 763 hwif->cur_dev = drive;
764 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 764 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
765 765
766 spin_unlock_irq(&hwgroup->lock); 766 spin_unlock_irq(&hwif->lock);
767 spin_lock_irq(q->queue_lock); 767 spin_lock_irq(q->queue_lock);
768 /* 768 /*
769 * we know that the queue isn't empty, but this can happen 769 * we know that the queue isn't empty, but this can happen
@@ -771,7 +771,7 @@ repeat:
771 */ 771 */
772 rq = elv_next_request(drive->queue); 772 rq = elv_next_request(drive->queue);
773 spin_unlock_irq(q->queue_lock); 773 spin_unlock_irq(q->queue_lock);
774 spin_lock_irq(&hwgroup->lock); 774 spin_lock_irq(&hwif->lock);
775 775
776 if (!rq) { 776 if (!rq) {
777 ide_unlock_port(hwif); 777 ide_unlock_port(hwif);
@@ -799,25 +799,25 @@ repeat:
799 goto plug_device; 799 goto plug_device;
800 } 800 }
801 801
802 hwgroup->rq = rq; 802 hwif->rq = rq;
803 803
804 spin_unlock_irq(&hwgroup->lock); 804 spin_unlock_irq(&hwif->lock);
805 startstop = start_request(drive, rq); 805 startstop = start_request(drive, rq);
806 spin_lock_irq(&hwgroup->lock); 806 spin_lock_irq(&hwif->lock);
807 807
808 if (startstop == ide_stopped) 808 if (startstop == ide_stopped)
809 goto repeat; 809 goto repeat;
810 } else 810 } else
811 goto plug_device; 811 goto plug_device;
812out: 812out:
813 spin_unlock_irq(&hwgroup->lock); 813 spin_unlock_irq(&hwif->lock);
814 if (rq == NULL) 814 if (rq == NULL)
815 ide_unlock_host(host); 815 ide_unlock_host(host);
816 spin_lock_irq(q->queue_lock); 816 spin_lock_irq(q->queue_lock);
817 return; 817 return;
818 818
819plug_device: 819plug_device:
820 spin_unlock_irq(&hwgroup->lock); 820 spin_unlock_irq(&hwif->lock);
821 ide_unlock_host(host); 821 ide_unlock_host(host);
822plug_device_2: 822plug_device_2:
823 spin_lock_irq(q->queue_lock); 823 spin_lock_irq(q->queue_lock);
@@ -827,7 +827,7 @@ plug_device_2:
827} 827}
828 828
829/* 829/*
830 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 830 * un-busy the port etc, and clear any pending DMA status. we want to
831 * retry the current request in pio mode instead of risking tossing it 831 * retry the current request in pio mode instead of risking tossing it
832 * all away 832 * all away
833 */ 833 */
@@ -864,12 +864,11 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
864 * un-busy drive etc and make sure request is sane 864 * un-busy drive etc and make sure request is sane
865 */ 865 */
866 866
867 rq = HWGROUP(drive)->rq; 867 rq = hwif->rq;
868
869 if (!rq) 868 if (!rq)
870 goto out; 869 goto out;
871 870
872 HWGROUP(drive)->rq = NULL; 871 hwif->rq = NULL;
873 872
874 rq->errors = 0; 873 rq->errors = 0;
875 874
@@ -897,7 +896,7 @@ static void ide_plug_device(ide_drive_t *drive)
897 896
898/** 897/**
899 * ide_timer_expiry - handle lack of an IDE interrupt 898 * ide_timer_expiry - handle lack of an IDE interrupt
900 * @data: timer callback magic (hwgroup) 899 * @data: timer callback magic (hwif)
901 * 900 *
902 * An IDE command has timed out before the expected drive return 901 * An IDE command has timed out before the expected drive return
903 * occurred. At this point we attempt to clean up the current 902 * occurred. At this point we attempt to clean up the current
@@ -911,19 +910,18 @@ static void ide_plug_device(ide_drive_t *drive)
911 910
912void ide_timer_expiry (unsigned long data) 911void ide_timer_expiry (unsigned long data)
913{ 912{
914 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 913 ide_hwif_t *hwif = (ide_hwif_t *)data;
915 ide_hwif_t *uninitialized_var(hwif);
916 ide_drive_t *uninitialized_var(drive); 914 ide_drive_t *uninitialized_var(drive);
917 ide_handler_t *handler; 915 ide_handler_t *handler;
918 ide_expiry_t *expiry;
919 unsigned long flags; 916 unsigned long flags;
920 unsigned long wait = -1; 917 unsigned long wait = -1;
921 int plug_device = 0; 918 int plug_device = 0;
922 919
923 spin_lock_irqsave(&hwgroup->lock, flags); 920 spin_lock_irqsave(&hwif->lock, flags);
921
922 handler = hwif->handler;
924 923
925 if (((handler = hwgroup->handler) == NULL) || 924 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
926 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
927 /* 925 /*
928 * Either a marginal timeout occurred 926 * Either a marginal timeout occurred
929 * (got the interrupt just as timer expired), 927 * (got the interrupt just as timer expired),
@@ -931,38 +929,39 @@ void ide_timer_expiry (unsigned long data)
931 * Either way, we don't really want to complain about anything. 929 * Either way, we don't really want to complain about anything.
932 */ 930 */
933 } else { 931 } else {
934 drive = hwgroup->cur_dev; 932 drive = hwif->cur_dev;
935 if (!drive) { 933 if (!drive) {
936 printk(KERN_ERR "%s: ->cur_dev was NULL\n", __func__); 934 printk(KERN_ERR "%s: ->cur_dev was NULL\n", __func__);
937 hwgroup->handler = NULL; 935 hwif->handler = NULL;
938 } else { 936 } else {
937 ide_expiry_t *expiry = hwif->expiry;
939 ide_startstop_t startstop = ide_stopped; 938 ide_startstop_t startstop = ide_stopped;
940 939
941 if ((expiry = hwgroup->expiry) != NULL) { 940 if (expiry) {
942 /* continue */ 941 /* continue */
943 if ((wait = expiry(drive)) > 0) { 942 if ((wait = expiry(drive)) > 0) {
944 /* reset timer */ 943 /* reset timer */
945 hwgroup->timer.expires = jiffies + wait; 944 hwif->timer.expires = jiffies + wait;
946 hwgroup->req_gen_timer = hwgroup->req_gen; 945 hwif->req_gen_timer = hwif->req_gen;
947 add_timer(&hwgroup->timer); 946 add_timer(&hwif->timer);
948 spin_unlock_irqrestore(&hwgroup->lock, flags); 947 spin_unlock_irqrestore(&hwif->lock, flags);
949 return; 948 return;
950 } 949 }
951 } 950 }
952 hwgroup->handler = NULL; 951 hwif->handler = NULL;
953 /* 952 /*
954 * We need to simulate a real interrupt when invoking 953 * We need to simulate a real interrupt when invoking
955 * the handler() function, which means we need to 954 * the handler() function, which means we need to
956 * globally mask the specific IRQ: 955 * globally mask the specific IRQ:
957 */ 956 */
958 spin_unlock(&hwgroup->lock); 957 spin_unlock(&hwif->lock);
959 hwif = HWIF(drive); 958 hwif = HWIF(drive);
960 /* disable_irq_nosync ?? */ 959 /* disable_irq_nosync ?? */
961 disable_irq(hwif->irq); 960 disable_irq(hwif->irq);
962 /* local CPU only, 961 /* local CPU only,
963 * as if we were handling an interrupt */ 962 * as if we were handling an interrupt */
964 local_irq_disable(); 963 local_irq_disable();
965 if (hwgroup->polling) { 964 if (hwif->polling) {
966 startstop = handler(drive); 965 startstop = handler(drive);
967 } else if (drive_is_ready(drive)) { 966 } else if (drive_is_ready(drive)) {
968 if (drive->waiting_for_dma) 967 if (drive->waiting_for_dma)
@@ -978,7 +977,7 @@ void ide_timer_expiry (unsigned long data)
978 ide_error(drive, "irq timeout", 977 ide_error(drive, "irq timeout",
979 hwif->tp_ops->read_status(hwif)); 978 hwif->tp_ops->read_status(hwif));
980 } 979 }
981 spin_lock_irq(&hwgroup->lock); 980 spin_lock_irq(&hwif->lock);
982 enable_irq(hwif->irq); 981 enable_irq(hwif->irq);
983 if (startstop == ide_stopped) { 982 if (startstop == ide_stopped) {
984 ide_unlock_port(hwif); 983 ide_unlock_port(hwif);
@@ -986,7 +985,7 @@ void ide_timer_expiry (unsigned long data)
986 } 985 }
987 } 986 }
988 } 987 }
989 spin_unlock_irqrestore(&hwgroup->lock, flags); 988 spin_unlock_irqrestore(&hwif->lock, flags);
990 989
991 if (plug_device) { 990 if (plug_device) {
992 ide_unlock_host(hwif->host); 991 ide_unlock_host(hwif->host);
@@ -1052,7 +1051,7 @@ static void unexpected_intr(int irq, ide_hwif_t *hwif)
1052 * places 1051 * places
1053 * 1052 *
1054 * hwif is the interface in the group currently performing 1053 * hwif is the interface in the group currently performing
1055 * a command. hwgroup->cur_dev is the drive and hwgroup->handler is 1054 * a command. hwif->cur_dev is the drive and hwif->handler is
1056 * the IRQ handler to call. As we issue a command the handlers 1055 * the IRQ handler to call. As we issue a command the handlers
1057 * step through multiple states, reassigning the handler to the 1056 * step through multiple states, reassigning the handler to the
1058 * next step in the process. Unlike a smart SCSI controller IDE 1057 * next step in the process. Unlike a smart SCSI controller IDE
@@ -1063,13 +1062,12 @@ static void unexpected_intr(int irq, ide_hwif_t *hwif)
1063 * 1062 *
1064 * The handler eventually returns ide_stopped to indicate the 1063 * The handler eventually returns ide_stopped to indicate the
1065 * request completed. At this point we issue the next request 1064 * request completed. At this point we issue the next request
1066 * on the hwgroup and the process begins again. 1065 * on the port and the process begins again.
1067 */ 1066 */
1068 1067
1069irqreturn_t ide_intr (int irq, void *dev_id) 1068irqreturn_t ide_intr (int irq, void *dev_id)
1070{ 1069{
1071 ide_hwif_t *hwif = (ide_hwif_t *)dev_id; 1070 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
1072 ide_hwgroup_t *hwgroup = hwif->hwgroup;
1073 ide_drive_t *uninitialized_var(drive); 1071 ide_drive_t *uninitialized_var(drive);
1074 ide_handler_t *handler; 1072 ide_handler_t *handler;
1075 unsigned long flags; 1073 unsigned long flags;
@@ -1082,12 +1080,14 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1082 goto out_early; 1080 goto out_early;
1083 } 1081 }
1084 1082
1085 spin_lock_irqsave(&hwgroup->lock, flags); 1083 spin_lock_irqsave(&hwif->lock, flags);
1086 1084
1087 if (!ide_ack_intr(hwif)) 1085 if (!ide_ack_intr(hwif))
1088 goto out; 1086 goto out;
1089 1087
1090 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1088 handler = hwif->handler;
1089
1090 if (handler == NULL || hwif->polling) {
1091 /* 1091 /*
1092 * Not expecting an interrupt from this drive. 1092 * Not expecting an interrupt from this drive.
1093 * That means this could be: 1093 * That means this could be:
@@ -1124,7 +1124,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1124 goto out; 1124 goto out;
1125 } 1125 }
1126 1126
1127 drive = hwgroup->cur_dev; 1127 drive = hwif->cur_dev;
1128 if (!drive) { 1128 if (!drive) {
1129 /* 1129 /*
1130 * This should NEVER happen, and there isn't much 1130 * This should NEVER happen, and there isn't much
@@ -1145,10 +1145,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1145 */ 1145 */
1146 goto out; 1146 goto out;
1147 1147
1148 hwgroup->handler = NULL; 1148 hwif->handler = NULL;
1149 hwgroup->req_gen++; 1149 hwif->req_gen++;
1150 del_timer(&hwgroup->timer); 1150 del_timer(&hwif->timer);
1151 spin_unlock(&hwgroup->lock); 1151 spin_unlock(&hwif->lock);
1152 1152
1153 if (hwif->port_ops && hwif->port_ops->clear_irq) 1153 if (hwif->port_ops && hwif->port_ops->clear_irq)
1154 hwif->port_ops->clear_irq(drive); 1154 hwif->port_ops->clear_irq(drive);
@@ -1159,7 +1159,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1159 /* service this interrupt, may set handler for next interrupt */ 1159 /* service this interrupt, may set handler for next interrupt */
1160 startstop = handler(drive); 1160 startstop = handler(drive);
1161 1161
1162 spin_lock_irq(&hwgroup->lock); 1162 spin_lock_irq(&hwif->lock);
1163 /* 1163 /*
1164 * Note that handler() may have set things up for another 1164 * Note that handler() may have set things up for another
1165 * interrupt to occur soon, but it cannot happen until 1165 * interrupt to occur soon, but it cannot happen until
@@ -1168,7 +1168,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1168 * won't allow another of the same (on any CPU) until we return. 1168 * won't allow another of the same (on any CPU) until we return.
1169 */ 1169 */
1170 if (startstop == ide_stopped) { 1170 if (startstop == ide_stopped) {
1171 if (hwgroup->handler == NULL) { /* paranoia */ 1171 if (hwif->handler == NULL) { /* paranoia */
1172 ide_unlock_port(hwif); 1172 ide_unlock_port(hwif);
1173 plug_device = 1; 1173 plug_device = 1;
1174 } else 1174 } else
@@ -1178,7 +1178,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1178out_handled: 1178out_handled:
1179 irq_ret = IRQ_HANDLED; 1179 irq_ret = IRQ_HANDLED;
1180out: 1180out:
1181 spin_unlock_irqrestore(&hwgroup->lock, flags); 1181 spin_unlock_irqrestore(&hwif->lock, flags);
1182out_early: 1182out_early:
1183 if (plug_device) { 1183 if (plug_device) {
1184 ide_unlock_host(hwif->host); 1184 ide_unlock_host(hwif->host);
@@ -1205,11 +1205,10 @@ out_early:
1205 1205
1206void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1206void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1207{ 1207{
1208 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1209 struct request_queue *q = drive->queue; 1208 struct request_queue *q = drive->queue;
1210 unsigned long flags; 1209 unsigned long flags;
1211 1210
1212 hwgroup->rq = NULL; 1211 drive->hwif->rq = NULL;
1213 1212
1214 spin_lock_irqsave(q->queue_lock, flags); 1213 spin_lock_irqsave(q->queue_lock, flags);
1215 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 1214 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);