aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-taskfile.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/ide-taskfile.c')
-rw-r--r--drivers/ide/ide-taskfile.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index ea345369553e..aeddbbd69e86 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
64 ide_hwif_t *hwif = HWIF(drive); 64 ide_hwif_t *hwif = HWIF(drive);
65 struct ide_taskfile *tf = &task->tf; 65 struct ide_taskfile *tf = &task->tf;
66 ide_handler_t *handler = NULL; 66 ide_handler_t *handler = NULL;
67 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 68 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
68 69
69 if (task->data_phase == TASKFILE_MULTI_IN || 70 if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
80 81
81 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 82 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82 ide_tf_dump(drive->name, tf); 83 ide_tf_dump(drive->name, tf);
83 hwif->set_irq(hwif, 1); 84 tp_ops->set_irq(hwif, 1);
84 SELECT_MASK(drive, 0); 85 SELECT_MASK(drive, 0);
85 hwif->tf_load(drive, task); 86 tp_ops->tf_load(drive, task);
86 } 87 }
87 88
88 switch (task->data_phase) { 89 switch (task->data_phase) {
89 case TASKFILE_MULTI_OUT: 90 case TASKFILE_MULTI_OUT:
90 case TASKFILE_OUT: 91 case TASKFILE_OUT:
91 hwif->exec_command(hwif, tf->command); 92 tp_ops->exec_command(hwif, tf->command);
92 ndelay(400); /* FIXME */ 93 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive, task->rq); 94 return pre_task_out_intr(drive, task->rq);
94 case TASKFILE_MULTI_IN: 95 case TASKFILE_MULTI_IN:
@@ -125,7 +126,7 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
125static ide_startstop_t set_multmode_intr(ide_drive_t *drive) 126static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
126{ 127{
127 ide_hwif_t *hwif = drive->hwif; 128 ide_hwif_t *hwif = drive->hwif;
128 u8 stat = hwif->read_status(hwif); 129 u8 stat = hwif->tp_ops->read_status(hwif);
129 130
130 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 131 if (OK_STAT(stat, READY_STAT, BAD_STAT))
131 drive->mult_count = drive->mult_req; 132 drive->mult_count = drive->mult_req;
@@ -146,8 +147,12 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
146 int retries = 5; 147 int retries = 5;
147 u8 stat; 148 u8 stat;
148 149
149 while (((stat = hwif->read_status(hwif)) & BUSY_STAT) && retries--) 150 while (1) {
151 stat = hwif->tp_ops->read_status(hwif);
152 if ((stat & BUSY_STAT) == 0 || retries-- == 0)
153 break;
150 udelay(10); 154 udelay(10);
155 };
151 156
152 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 157 if (OK_STAT(stat, READY_STAT, BAD_STAT))
153 return ide_stopped; 158 return ide_stopped;
@@ -165,7 +170,7 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
165static ide_startstop_t recal_intr(ide_drive_t *drive) 170static ide_startstop_t recal_intr(ide_drive_t *drive)
166{ 171{
167 ide_hwif_t *hwif = drive->hwif; 172 ide_hwif_t *hwif = drive->hwif;
168 u8 stat = hwif->read_status(hwif); 173 u8 stat = hwif->tp_ops->read_status(hwif);
169 174
170 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 175 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
171 return ide_error(drive, "recal_intr", stat); 176 return ide_error(drive, "recal_intr", stat);
@@ -182,7 +187,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
182 u8 stat; 187 u8 stat;
183 188
184 local_irq_enable_in_hardirq(); 189 local_irq_enable_in_hardirq();
185 stat = hwif->read_status(hwif); 190 stat = hwif->tp_ops->read_status(hwif);
186 191
187 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 192 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
188 return ide_error(drive, "task_no_data_intr", stat); 193 return ide_error(drive, "task_no_data_intr", stat);
@@ -205,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
205 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 210 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
206 */ 211 */
207 for (retries = 0; retries < 1000; retries++) { 212 for (retries = 0; retries < 1000; retries++) {
208 stat = hwif->read_status(hwif); 213 stat = hwif->tp_ops->read_status(hwif);
209 214
210 if (stat & BUSY_STAT) 215 if (stat & BUSY_STAT)
211 udelay(10); 216 udelay(10);
@@ -260,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
260 265
261 /* do the actual data transfer */ 266 /* do the actual data transfer */
262 if (write) 267 if (write)
263 hwif->output_data(drive, rq, buf, SECTOR_SIZE); 268 hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
264 else 269 else
265 hwif->input_data(drive, rq, buf, SECTOR_SIZE); 270 hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
266 271
267 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 272 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
268#ifdef CONFIG_HIGHMEM 273#ifdef CONFIG_HIGHMEM
@@ -389,7 +394,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
389{ 394{
390 ide_hwif_t *hwif = drive->hwif; 395 ide_hwif_t *hwif = drive->hwif;
391 struct request *rq = hwif->hwgroup->rq; 396 struct request *rq = hwif->hwgroup->rq;
392 u8 stat = hwif->read_status(hwif); 397 u8 stat = hwif->tp_ops->read_status(hwif);
393 398
394 /* Error? */ 399 /* Error? */
395 if (stat & ERR_STAT) 400 if (stat & ERR_STAT)
@@ -423,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
423{ 428{
424 ide_hwif_t *hwif = drive->hwif; 429 ide_hwif_t *hwif = drive->hwif;
425 struct request *rq = HWGROUP(drive)->rq; 430 struct request *rq = HWGROUP(drive)->rq;
426 u8 stat = hwif->read_status(hwif); 431 u8 stat = hwif->tp_ops->read_status(hwif);
427 432
428 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 433 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
429 return task_error(drive, rq, __func__, stat); 434 return task_error(drive, rq, __func__, stat);