aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r--drivers/ide/ide-io.c114
1 files changed, 18 insertions, 96 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 696525342e9a..28057747c1f8 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -358,31 +358,6 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
358 358
359EXPORT_SYMBOL(ide_end_drive_cmd); 359EXPORT_SYMBOL(ide_end_drive_cmd);
360 360
361/**
362 * try_to_flush_leftover_data - flush junk
363 * @drive: drive to flush
364 *
365 * try_to_flush_leftover_data() is invoked in response to a drive
366 * unexpectedly having its DRQ_STAT bit set. As an alternative to
367 * resetting the drive, this routine tries to clear the condition
368 * by read a sector's worth of data from the drive. Of course,
369 * this may not help if the drive is *waiting* for data from *us*.
370 */
371static void try_to_flush_leftover_data (ide_drive_t *drive)
372{
373 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
374
375 if (drive->media != ide_disk)
376 return;
377 while (i > 0) {
378 u32 buffer[16];
379 u32 wcount = (i > 16) ? 16 : i;
380
381 i -= wcount;
382 drive->hwif->input_data(drive, NULL, buffer, wcount * 4);
383 }
384}
385
386static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 361static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
387{ 362{
388 if (rq->rq_disk) { 363 if (rq->rq_disk) {
@@ -422,8 +397,11 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
422 } 397 }
423 398
424 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && 399 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ &&
425 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) 400 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
426 try_to_flush_leftover_data(drive); 401 int nsect = drive->mult_count ? drive->mult_count : 1;
402
403 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
404 }
427 405
428 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 406 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
429 ide_kill_rq(drive, rq); 407 ide_kill_rq(drive, rq);
@@ -459,7 +437,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
459 437
460 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 438 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
461 /* force an abort */ 439 /* force an abort */
462 hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE, 440 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
463 hwif->io_ports.command_addr); 441 hwif->io_ports.command_addr);
464 442
465 if (rq->errors >= ERROR_MAX) { 443 if (rq->errors >= ERROR_MAX) {
@@ -1539,88 +1517,30 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1539} 1517}
1540 1518
1541/** 1519/**
1542 * ide_init_drive_cmd - initialize a drive command request
1543 * @rq: request object
1544 *
1545 * Initialize a request before we fill it in and send it down to
1546 * ide_do_drive_cmd. Commands must be set up by this function. Right
1547 * now it doesn't do a lot, but if that changes abusers will have a
1548 * nasty surprise.
1549 */
1550
1551void ide_init_drive_cmd (struct request *rq)
1552{
1553 blk_rq_init(NULL, rq);
1554}
1555
1556EXPORT_SYMBOL(ide_init_drive_cmd);
1557
1558/**
1559 * ide_do_drive_cmd - issue IDE special command 1520 * ide_do_drive_cmd - issue IDE special command
1560 * @drive: device to issue command 1521 * @drive: device to issue command
1561 * @rq: request to issue 1522 * @rq: request to issue
1562 * @action: action for processing
1563 * 1523 *
1564 * This function issues a special IDE device request 1524 * This function issues a special IDE device request
1565 * onto the request queue. 1525 * onto the request queue.
1566 * 1526 *
1567 * If action is ide_wait, then the rq is queued at the end of the 1527 * the rq is queued at the head of the request queue, displacing
1568 * request queue, and the function sleeps until it has been processed. 1528 * the currently-being-processed request and this function
1569 * This is for use when invoked from an ioctl handler. 1529 * returns immediately without waiting for the new rq to be
1570 * 1530 * completed. This is VERY DANGEROUS, and is intended for
1571 * If action is ide_preempt, then the rq is queued at the head of 1531 * careful use by the ATAPI tape/cdrom driver code.
1572 * the request queue, displacing the currently-being-processed
1573 * request and this function returns immediately without waiting
1574 * for the new rq to be completed. This is VERY DANGEROUS, and is
1575 * intended for careful use by the ATAPI tape/cdrom driver code.
1576 *
1577 * If action is ide_end, then the rq is queued at the end of the
1578 * request queue, and the function returns immediately without waiting
1579 * for the new rq to be completed. This is again intended for careful
1580 * use by the ATAPI tape/cdrom driver code.
1581 */ 1532 */
1582 1533
1583int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1534void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1584{ 1535{
1585 unsigned long flags; 1536 unsigned long flags;
1586 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1537 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1587 DECLARE_COMPLETION_ONSTACK(wait);
1588 int where = ELEVATOR_INSERT_BACK, err;
1589 int must_wait = (action == ide_wait || action == ide_head_wait);
1590
1591 rq->errors = 0;
1592
1593 /*
1594 * we need to hold an extra reference to request for safe inspection
1595 * after completion
1596 */
1597 if (must_wait) {
1598 rq->ref_count++;
1599 rq->end_io_data = &wait;
1600 rq->end_io = blk_end_sync_rq;
1601 }
1602 1538
1603 spin_lock_irqsave(&ide_lock, flags); 1539 spin_lock_irqsave(&ide_lock, flags);
1604 if (action == ide_preempt) 1540 hwgroup->rq = NULL;
1605 hwgroup->rq = NULL; 1541 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1);
1606 if (action == ide_preempt || action == ide_head_wait) { 1542 __generic_unplug_device(drive->queue);
1607 where = ELEVATOR_INSERT_FRONT;
1608 rq->cmd_flags |= REQ_PREEMPT;
1609 }
1610 __elv_add_request(drive->queue, rq, where, 0);
1611 ide_do_request(hwgroup, IDE_NO_IRQ);
1612 spin_unlock_irqrestore(&ide_lock, flags); 1543 spin_unlock_irqrestore(&ide_lock, flags);
1613
1614 err = 0;
1615 if (must_wait) {
1616 wait_for_completion(&wait);
1617 if (rq->errors)
1618 err = -EIO;
1619
1620 blk_put_request(rq);
1621 }
1622
1623 return err;
1624} 1544}
1625 1545
1626EXPORT_SYMBOL(ide_do_drive_cmd); 1546EXPORT_SYMBOL(ide_do_drive_cmd);
@@ -1637,6 +1557,8 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1637 task.tf.lbah = (bcount >> 8) & 0xff; 1557 task.tf.lbah = (bcount >> 8) & 0xff;
1638 1558
1639 ide_tf_dump(drive->name, &task.tf); 1559 ide_tf_dump(drive->name, &task.tf);
1560 ide_set_irq(drive, 1);
1561 SELECT_MASK(drive, 0);
1640 drive->hwif->tf_load(drive, &task); 1562 drive->hwif->tf_load(drive, &task);
1641} 1563}
1642 1564