aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/cciss.c39
-rw-r--r--drivers/block/cpqarray.c16
-rw-r--r--drivers/block/floppy.c58
-rw-r--r--drivers/block/hd.c104
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c535
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/ps3disk.c22
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c48
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c75
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c32
-rw-r--r--drivers/block/xsysace.c44
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/cdrom/gdrom.c34
-rw-r--r--drivers/cdrom/viocd.c29
-rw-r--r--drivers/ide/ide-atapi.c177
-rw-r--r--drivers/ide/ide-cd.c140
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c9
-rw-r--r--drivers/ide/ide-dma.c22
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-io.c57
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-park.c7
-rw-r--r--drivers/ide/ide-pm.c38
-rw-r--r--drivers/ide/ide-tape.c735
-rw-r--r--drivers/ide/ide-taskfile.c20
-rw-r--r--drivers/ide/pdc202xx_old.c2
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/memstick/core/mspro_block.c17
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c38
-rw-r--r--drivers/mmc/card/block.c10
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mtd/mtd_blkdevs.c41
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c24
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c23
-rw-r--r--drivers/scsi/osd/osd_initiator.c72
-rw-r--r--drivers/scsi/scsi_lib.c87
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c24
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c15
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/u14-34f.c22
74 files changed, 1500 insertions, 1948 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9f..d0dfeef55db5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
1084 if (likely(!blk_pc_request(rq))) 1084 if (likely(!blk_pc_request(rq)))
1085 return 0; 1085 return 0;
1086 1086
1087 if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
1088 return 0; 1088 return 0;
1089 1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f2..668dc234b8e2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3321 DAC960_Command_T *Command; 3321 DAC960_Command_T *Command;
3322 3322
3323 while(1) { 3323 while(1) {
3324 Request = elv_next_request(req_q); 3324 Request = blk_peek_request(req_q);
3325 if (!Request) 3325 if (!Request)
3326 return 1; 3326 return 1;
3327 3327
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3338 } 3338 }
3339 Command->Completion = Request->end_io_data; 3339 Command->Completion = Request->end_io_data;
3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3341 Command->BlockNumber = Request->sector; 3341 Command->BlockNumber = blk_rq_pos(Request);
3342 Command->BlockCount = Request->nr_sectors; 3342 Command->BlockCount = blk_rq_sectors(Request);
3343 Command->Request = Request; 3343 Command->Request = Request;
3344 blkdev_dequeue_request(Request); 3344 blk_start_request(Request);
3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3345 Command->SegmentCount = blk_rq_map_sg(req_q,
3346 Command->Request, Command->cmd_sglist); 3346 Command->Request, Command->cmd_sglist);
3347 /* pci_map_sg MAY change the value of SegCount */ 3347 /* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3431 * successfully as possible. 3431 * successfully as possible.
3432 */ 3432 */
3433 Command->SegmentCount = 1; 3433 Command->SegmentCount = 1;
3434 Command->BlockNumber = Request->sector; 3434 Command->BlockNumber = blk_rq_pos(Request);
3435 Command->BlockCount = 1; 3435 Command->BlockCount = 1;
3436 DAC960_QueueReadWriteCommand(Command); 3436 DAC960_QueueReadWriteCommand(Command);
3437 return; 3437 return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc9..f42fa50d3550 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
412 412
413config MG_DISK 413config MG_DISK
414 tristate "mGine mflash, gflash support" 414 tristate "mGine mflash, gflash support"
415 depends on ARM && ATA && GPIOLIB 415 depends on ARM && GPIOLIB
416 help 416 help
417 mGine mFlash(gFlash) block device driver 417 mGine mFlash(gFlash) block device driver
418 418
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff7068..9c6e5b0fe894 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
112MODULE_LICENSE("GPL"); 112MODULE_LICENSE("GPL");
113 113
114static struct request_queue *floppy_queue; 114static struct request_queue *floppy_queue;
115#define QUEUE (floppy_queue)
116#define CURRENT elv_next_request(floppy_queue)
117 115
118/* 116/*
119 * Macros 117 * Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
1335 1333
1336static void redo_fd_request(void) 1334static void redo_fd_request(void)
1337{ 1335{
1336 struct request *rq;
1338 unsigned int cnt, block, track, sector; 1337 unsigned int cnt, block, track, sector;
1339 int drive; 1338 int drive;
1340 struct amiga_floppy_struct *floppy; 1339 struct amiga_floppy_struct *floppy;
1341 char *data; 1340 char *data;
1342 unsigned long flags; 1341 unsigned long flags;
1342 int err;
1343 1343
1344 repeat: 1344next_req:
1345 if (!CURRENT) { 1345 rq = blk_fetch_request(floppy_queue);
1346 if (!rq) {
1346 /* Nothing left to do */ 1347 /* Nothing left to do */
1347 return; 1348 return;
1348 } 1349 }
1349 1350
1350 floppy = CURRENT->rq_disk->private_data; 1351 floppy = rq->rq_disk->private_data;
1351 drive = floppy - unit; 1352 drive = floppy - unit;
1352 1353
1354next_segment:
1353 /* Here someone could investigate to be more efficient */ 1355 /* Here someone could investigate to be more efficient */
1354 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1356 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
1355#ifdef DEBUG 1357#ifdef DEBUG
1356 printk("fd: sector %ld + %d requested for %s\n", 1358 printk("fd: sector %ld + %d requested for %s\n",
1357 CURRENT->sector,cnt, 1359 blk_rq_pos(rq), cnt,
1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1360 (rq_data_dir(rq) == READ) ? "read" : "write");
1359#endif 1361#endif
1360 block = CURRENT->sector + cnt; 1362 block = blk_rq_pos(rq) + cnt;
1361 if ((int)block > floppy->blocks) { 1363 if ((int)block > floppy->blocks) {
1362 end_request(CURRENT, 0); 1364 err = -EIO;
1363 goto repeat; 1365 break;
1364 } 1366 }
1365 1367
1366 track = block / (floppy->dtype->sects * floppy->type->sect_mult); 1368 track = block / (floppy->dtype->sects * floppy->type->sect_mult);
1367 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); 1369 sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
1368 data = CURRENT->buffer + 512 * cnt; 1370 data = rq->buffer + 512 * cnt;
1369#ifdef DEBUG 1371#ifdef DEBUG
1370 printk("access to track %d, sector %d, with buffer at " 1372 printk("access to track %d, sector %d, with buffer at "
1371 "0x%08lx\n", track, sector, data); 1373 "0x%08lx\n", track, sector, data);
1372#endif 1374#endif
1373 1375
1374 if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
1375 printk(KERN_WARNING "do_fd_request: unknown command\n");
1376 end_request(CURRENT, 0);
1377 goto repeat;
1378 }
1379 if (get_track(drive, track) == -1) { 1376 if (get_track(drive, track) == -1) {
1380 end_request(CURRENT, 0); 1377 err = -EIO;
1381 goto repeat; 1378 break;
1382 } 1379 }
1383 1380
1384 switch (rq_data_dir(CURRENT)) { 1381 if (rq_data_dir(rq) == READ) {
1385 case READ:
1386 memcpy(data, floppy->trackbuf + sector * 512, 512); 1382 memcpy(data, floppy->trackbuf + sector * 512, 512);
1387 break; 1383 } else {
1388
1389 case WRITE:
1390 memcpy(floppy->trackbuf + sector * 512, data, 512); 1384 memcpy(floppy->trackbuf + sector * 512, data, 512);
1391 1385
1392 /* keep the drive spinning while writes are scheduled */ 1386 /* keep the drive spinning while writes are scheduled */
1393 if (!fd_motor_on(drive)) { 1387 if (!fd_motor_on(drive)) {
1394 end_request(CURRENT, 0); 1388 err = -EIO;
1395 goto repeat; 1389 break;
1396 } 1390 }
1397 /* 1391 /*
1398 * setup a callback to write the track buffer 1392 * setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
1404 /* reset the timer */ 1398 /* reset the timer */
1405 mod_timer (flush_track_timer + drive, jiffies + 1); 1399 mod_timer (flush_track_timer + drive, jiffies + 1);
1406 local_irq_restore(flags); 1400 local_irq_restore(flags);
1407 break;
1408 } 1401 }
1409 } 1402 }
1410 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1411 CURRENT->sector += CURRENT->current_nr_sectors;
1412 1403
1413 end_request(CURRENT, 1); 1404 if (__blk_end_request_cur(rq, err))
1414 goto repeat; 1405 goto next_segment;
1406 goto next_req;
1415} 1407}
1416 1408
1417static void do_fd_request(struct request_queue * q) 1409static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4c..f5e7180d7f47 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
79#undef DEBUG 79#undef DEBUG
80 80
81static struct request_queue *floppy_queue; 81static struct request_queue *floppy_queue;
82 82static struct request *fd_request;
83#define QUEUE (floppy_queue)
84#define CURRENT elv_next_request(floppy_queue)
85 83
86/* Disk types: DD, HD, ED */ 84/* Disk types: DD, HD, ED */
87static struct atari_disk_type { 85static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
376static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 374static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
377static DEFINE_TIMER(fd_timer, check_change, 0, 0); 375static DEFINE_TIMER(fd_timer, check_change, 0, 0);
378 376
377static void fd_end_request_cur(int err)
378{
379 if (!__blk_end_request_cur(fd_request, err))
380 fd_request = NULL;
381}
382
379static inline void start_motor_off_timer(void) 383static inline void start_motor_off_timer(void)
380{ 384{
381 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); 385 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
606 return; 610 return;
607 } 611 }
608 612
609 if (!CURRENT) 613 if (!fd_request)
610 return; 614 return;
611 615
612 CURRENT->errors++; 616 fd_request->errors++;
613 if (CURRENT->errors >= MAX_ERRORS) { 617 if (fd_request->errors >= MAX_ERRORS) {
614 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); 618 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
615 end_request(CURRENT, 0); 619 fd_end_request_cur(-EIO);
616 } 620 }
617 else if (CURRENT->errors == RECALIBRATE_ERRORS) { 621 else if (fd_request->errors == RECALIBRATE_ERRORS) {
618 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); 622 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
619 if (SelectedDrive != -1) 623 if (SelectedDrive != -1)
620 SUD.track = -1; 624 SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 729 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
726 if (ReqCmd == READ) { 730 if (ReqCmd == READ) {
727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 731 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
728 if (++ReqCnt < CURRENT->current_nr_sectors) { 732 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
729 /* read next sector */ 733 /* read next sector */
730 setup_req_params( drive ); 734 setup_req_params( drive );
731 goto repeat; 735 goto repeat;
732 } 736 }
733 else { 737 else {
734 /* all sectors finished */ 738 /* all sectors finished */
735 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 739 fd_end_request_cur(0);
736 CURRENT->sector += CURRENT->current_nr_sectors;
737 end_request(CURRENT, 1);
738 redo_fd_request(); 740 redo_fd_request();
739 return; 741 return;
740 } 742 }
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (++ReqCnt < CURRENT->current_nr_sectors) { 1137 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
1136 /* read next sector */ 1138 /* read next sector */
1137 setup_req_params( SelectedDrive ); 1139 setup_req_params( SelectedDrive );
1138 do_fd_action( SelectedDrive ); 1140 do_fd_action( SelectedDrive );
1139 } 1141 }
1140 else { 1142 else {
1141 /* all sectors finished */ 1143 /* all sectors finished */
1142 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1144 fd_end_request_cur(0);
1143 CURRENT->sector += CURRENT->current_nr_sectors;
1144 end_request(CURRENT, 1);
1145 redo_fd_request(); 1145 redo_fd_request();
1146 } 1146 }
1147 return; 1147 return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
1382 ReqData = ReqBuffer + 512 * ReqCnt; 1382 ReqData = ReqBuffer + 512 * ReqCnt;
1383 1383
1384 if (UseTrackbuffer) 1384 if (UseTrackbuffer)
1385 read_track = (ReqCmd == READ && CURRENT->errors == 0); 1385 read_track = (ReqCmd == READ && fd_request->errors == 0);
1386 else 1386 else
1387 read_track = 0; 1387 read_track = 0;
1388 1388
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
1396 int drive, type; 1396 int drive, type;
1397 struct atari_floppy_struct *floppy; 1397 struct atari_floppy_struct *floppy;
1398 1398
1399 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1399 DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
1400 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1400 fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
1401 CURRENT ? CURRENT->sector : 0 )); 1401 fd_request ? blk_rq_pos(fd_request) : 0 ));
1402 1402
1403 IsFormatting = 0; 1403 IsFormatting = 0;
1404 1404
1405repeat: 1405repeat:
1406 if (!fd_request) {
1407 fd_request = blk_fetch_request(floppy_queue);
1408 if (!fd_request)
1409 goto the_end;
1410 }
1406 1411
1407 if (!CURRENT) 1412 floppy = fd_request->rq_disk->private_data;
1408 goto the_end;
1409
1410 floppy = CURRENT->rq_disk->private_data;
1411 drive = floppy - unit; 1413 drive = floppy - unit;
1412 type = floppy->type; 1414 type = floppy->type;
1413 1415
1414 if (!UD.connected) { 1416 if (!UD.connected) {
1415 /* drive not connected */ 1417 /* drive not connected */
1416 printk(KERN_ERR "Unknown Device: fd%d\n", drive ); 1418 printk(KERN_ERR "Unknown Device: fd%d\n", drive );
1417 end_request(CURRENT, 0); 1419 fd_end_request_cur(-EIO);
1418 goto repeat; 1420 goto repeat;
1419 } 1421 }
1420 1422
@@ -1430,12 +1432,12 @@ repeat:
1430 /* user supplied disk type */ 1432 /* user supplied disk type */
1431 if (--type >= NUM_DISK_MINORS) { 1433 if (--type >= NUM_DISK_MINORS) {
1432 printk(KERN_WARNING "fd%d: invalid disk format", drive ); 1434 printk(KERN_WARNING "fd%d: invalid disk format", drive );
1433 end_request(CURRENT, 0); 1435 fd_end_request_cur(-EIO);
1434 goto repeat; 1436 goto repeat;
1435 } 1437 }
1436 if (minor2disktype[type].drive_types > DriveType) { 1438 if (minor2disktype[type].drive_types > DriveType) {
1437 printk(KERN_WARNING "fd%d: unsupported disk format", drive ); 1439 printk(KERN_WARNING "fd%d: unsupported disk format", drive );
1438 end_request(CURRENT, 0); 1440 fd_end_request_cur(-EIO);
1439 goto repeat; 1441 goto repeat;
1440 } 1442 }
1441 type = minor2disktype[type].index; 1443 type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
1444 UD.autoprobe = 0; 1446 UD.autoprobe = 0;
1445 } 1447 }
1446 1448
1447 if (CURRENT->sector + 1 > UDT->blocks) { 1449 if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
1448 end_request(CURRENT, 0); 1450 fd_end_request_cur(-EIO);
1449 goto repeat; 1451 goto repeat;
1450 } 1452 }
1451 1453
@@ -1453,9 +1455,9 @@ repeat:
1453 del_timer( &motor_off_timer ); 1455 del_timer( &motor_off_timer );
1454 1456
1455 ReqCnt = 0; 1457 ReqCnt = 0;
1456 ReqCmd = rq_data_dir(CURRENT); 1458 ReqCmd = rq_data_dir(fd_request);
1457 ReqBlock = CURRENT->sector; 1459 ReqBlock = blk_rq_pos(fd_request);
1458 ReqBuffer = CURRENT->buffer; 1460 ReqBuffer = fd_request->buffer;
1459 setup_req_params( drive ); 1461 setup_req_params( drive );
1460 do_fd_action( drive ); 1462 do_fd_action( drive );
1461 1463
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa6..e714e7cce6f2 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1299,7 +1299,6 @@ static void cciss_softirq_done(struct request *rq)
1299{ 1299{
1300 CommandList_struct *cmd = rq->completion_data; 1300 CommandList_struct *cmd = rq->completion_data;
1301 ctlr_info_t *h = hba[cmd->ctlr]; 1301 ctlr_info_t *h = hba[cmd->ctlr];
1302 unsigned int nr_bytes;
1303 unsigned long flags; 1302 unsigned long flags;
1304 u64bit temp64; 1303 u64bit temp64;
1305 int i, ddir; 1304 int i, ddir;
@@ -1321,15 +1320,11 @@ static void cciss_softirq_done(struct request *rq)
1321 printk("Done with %p\n", rq); 1320 printk("Done with %p\n", rq);
1322#endif /* CCISS_DEBUG */ 1321#endif /* CCISS_DEBUG */
1323 1322
1324 /* 1323 /* set the residual count for pc requests */
1325 * Store the full size and set the residual count for pc requests
1326 */
1327 nr_bytes = blk_rq_bytes(rq);
1328 if (blk_pc_request(rq)) 1324 if (blk_pc_request(rq))
1329 rq->data_len = cmd->err_info->ResidualCnt; 1325 rq->resid_len = cmd->err_info->ResidualCnt;
1330 1326
1331 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) 1327 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1332 BUG();
1333 1328
1334 spin_lock_irqsave(&h->lock, flags); 1329 spin_lock_irqsave(&h->lock, flags);
1335 cmd_free(h, cmd, 1); 1330 cmd_free(h, cmd, 1);
@@ -2691,7 +2686,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2691 printk(KERN_WARNING "cciss: cmd %p has" 2686 printk(KERN_WARNING "cciss: cmd %p has"
2692 " completed with data underrun " 2687 " completed with data underrun "
2693 "reported\n", cmd); 2688 "reported\n", cmd);
2694 cmd->rq->data_len = cmd->err_info->ResidualCnt; 2689 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
2695 } 2690 }
2696 break; 2691 break;
2697 case CMD_DATA_OVERRUN: 2692 case CMD_DATA_OVERRUN:
@@ -2806,7 +2801,7 @@ static void do_cciss_request(struct request_queue *q)
2806 goto startio; 2801 goto startio;
2807 2802
2808 queue: 2803 queue:
2809 creq = elv_next_request(q); 2804 creq = blk_peek_request(q);
2810 if (!creq) 2805 if (!creq)
2811 goto startio; 2806 goto startio;
2812 2807
@@ -2815,7 +2810,7 @@ static void do_cciss_request(struct request_queue *q)
2815 if ((c = cmd_alloc(h, 1)) == NULL) 2810 if ((c = cmd_alloc(h, 1)) == NULL)
2816 goto full; 2811 goto full;
2817 2812
2818 blkdev_dequeue_request(creq); 2813 blk_start_request(creq);
2819 2814
2820 spin_unlock_irq(q->queue_lock); 2815 spin_unlock_irq(q->queue_lock);
2821 2816
@@ -2840,10 +2835,10 @@ static void do_cciss_request(struct request_queue *q)
2840 c->Request.Timeout = 0; // Don't time out 2835 c->Request.Timeout = 0; // Don't time out
2841 c->Request.CDB[0] = 2836 c->Request.CDB[0] =
2842 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2837 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2843 start_blk = creq->sector; 2838 start_blk = blk_rq_pos(creq);
2844#ifdef CCISS_DEBUG 2839#ifdef CCISS_DEBUG
2845 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 2840 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
2846 (int)creq->nr_sectors); 2841 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
2847#endif /* CCISS_DEBUG */ 2842#endif /* CCISS_DEBUG */
2848 2843
2849 sg_init_table(tmp_sg, MAXSGENTRIES); 2844 sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +2864,8 @@ static void do_cciss_request(struct request_queue *q)
2869 h->maxSG = seg; 2864 h->maxSG = seg;
2870 2865
2871#ifdef CCISS_DEBUG 2866#ifdef CCISS_DEBUG
2872 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 2867 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
2873 creq->nr_sectors, seg); 2868 blk_rq_sectors(creq), seg);
2874#endif /* CCISS_DEBUG */ 2869#endif /* CCISS_DEBUG */
2875 2870
2876 c->Header.SGList = c->Header.SGTotal = seg; 2871 c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +2877,8 @@ static void do_cciss_request(struct request_queue *q)
2882 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 2877 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2883 c->Request.CDB[5] = start_blk & 0xff; 2878 c->Request.CDB[5] = start_blk & 0xff;
2884 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 2879 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2885 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 2880 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
2886 c->Request.CDB[8] = creq->nr_sectors & 0xff; 2881 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
2887 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 2882 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2888 } else { 2883 } else {
2889 u32 upper32 = upper_32_bits(start_blk); 2884 u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +2893,10 @@ static void do_cciss_request(struct request_queue *q)
2898 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 2893 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2899 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 2894 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2900 c->Request.CDB[9]= start_blk & 0xff; 2895 c->Request.CDB[9]= start_blk & 0xff;
2901 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 2896 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
2902 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 2897 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
2903 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 2898 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
2904 c->Request.CDB[13]= creq->nr_sectors & 0xff; 2899 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
2905 c->Request.CDB[14] = c->Request.CDB[15] = 0; 2900 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2906 } 2901 }
2907 } else if (blk_pc_request(creq)) { 2902 } else if (blk_pc_request(creq)) {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca11159..a02dcfc00f13 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
903 goto startio; 903 goto startio;
904 904
905queue_next: 905queue_next:
906 creq = elv_next_request(q); 906 creq = blk_peek_request(q);
907 if (!creq) 907 if (!creq)
908 goto startio; 908 goto startio;
909 909
@@ -912,17 +912,18 @@ queue_next:
912 if ((c = cmd_alloc(h,1)) == NULL) 912 if ((c = cmd_alloc(h,1)) == NULL)
913 goto startio; 913 goto startio;
914 914
915 blkdev_dequeue_request(creq); 915 blk_start_request(creq);
916 916
917 c->ctlr = h->ctlr; 917 c->ctlr = h->ctlr;
918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
919 c->hdr.size = sizeof(rblk_t) >> 2; 919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t); 920 c->size += sizeof(rblk_t);
921 921
922 c->req.hdr.blk = creq->sector; 922 c->req.hdr.blk = blk_rq_pos(creq);
923 c->rq = creq; 923 c->rq = creq;
924DBGPX( 924DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 printk("sector=%d, nr_sectors=%u\n",
926 blk_rq_pos(creq), blk_rq_sectors(creq));
926); 927);
927 sg_init_table(tmp_sg, SG_MAX); 928 sg_init_table(tmp_sg, SG_MAX);
928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
940 tmp_sg[i].offset, 941 tmp_sg[i].offset,
941 tmp_sg[i].length, dir); 942 tmp_sg[i].length, dir);
942 } 943 }
943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.sg_cnt = seg;
945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
947 c->type = CMD_RWREQ; 948 c->type = CMD_RWREQ;
948 949
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1024 cmd->req.sg[i].size, ddir); 1025 cmd->req.sg[i].size, ddir);
1025 1026
1026 DBGPX(printk("Done with %p\n", rq);); 1027 DBGPX(printk("Done with %p\n", rq););
1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1028 __blk_end_request_all(rq, error);
1028 BUG();
1029} 1029}
1030 1030
1031/* 1031/*
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f1642..90877fee0ee0 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
931 del_timer(&fd_timeout); 931 del_timer(&fd_timeout);
932 cont = NULL; 932 cont = NULL;
933 clear_bit(0, &fdc_busy); 933 clear_bit(0, &fdc_busy);
934 if (elv_next_request(floppy_queue)) 934 if (current_req || blk_peek_request(floppy_queue))
935 do_fd_request(floppy_queue); 935 do_fd_request(floppy_queue);
936 spin_unlock_irqrestore(&floppy_lock, flags); 936 spin_unlock_irqrestore(&floppy_lock, flags);
937 wake_up(&fdc_wait); 937 wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
2303 2303
2304 /* current_count_sectors can be zero if transfer failed */ 2304 /* current_count_sectors can be zero if transfer failed */
2305 if (error) 2305 if (error)
2306 nr_sectors = req->current_nr_sectors; 2306 nr_sectors = blk_rq_cur_sectors(req);
2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2307 if (__blk_end_request(req, error, nr_sectors << 9))
2308 return; 2308 return;
2309 2309
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
2332 if (uptodate) { 2332 if (uptodate) {
2333 /* maintain values for invalidation on geometry 2333 /* maintain values for invalidation on geometry
2334 * change */ 2334 * change */
2335 block = current_count_sectors + req->sector; 2335 block = current_count_sectors + blk_rq_pos(req);
2336 INFBOUND(DRS->maxblock, block); 2336 INFBOUND(DRS->maxblock, block);
2337 if (block > _floppy->sect) 2337 if (block > _floppy->sect)
2338 DRS->maxtrack = 1; 2338 DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
2346 /* record write error information */ 2346 /* record write error information */
2347 DRWE->write_errors++; 2347 DRWE->write_errors++;
2348 if (DRWE->write_errors == 1) { 2348 if (DRWE->write_errors == 1) {
2349 DRWE->first_error_sector = req->sector; 2349 DRWE->first_error_sector = blk_rq_pos(req);
2350 DRWE->first_error_generation = DRS->generation; 2350 DRWE->first_error_generation = DRS->generation;
2351 } 2351 }
2352 DRWE->last_error_sector = req->sector; 2352 DRWE->last_error_sector = blk_rq_pos(req);
2353 DRWE->last_error_generation = DRS->generation; 2353 DRWE->last_error_generation = DRS->generation;
2354 } 2354 }
2355 spin_lock_irqsave(q->queue_lock, flags); 2355 spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2503 2503
2504 max_sector = transfer_size(ssize, 2504 max_sector = transfer_size(ssize,
2505 min(max_sector, max_sector_2), 2505 min(max_sector, max_sector_2),
2506 current_req->nr_sectors); 2506 blk_rq_sectors(current_req));
2507 2507
2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2509 buffer_max > fsector_t + current_req->nr_sectors) 2509 buffer_max > fsector_t + blk_rq_sectors(current_req))
2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2510 current_count_sectors = min_t(int, buffer_max - fsector_t,
2511 current_req->nr_sectors); 2511 blk_rq_sectors(current_req));
2512 2512
2513 remaining = current_count_sectors << 9; 2513 remaining = current_count_sectors << 9;
2514#ifdef FLOPPY_SANITY_CHECK 2514#ifdef FLOPPY_SANITY_CHECK
2515 if ((remaining >> 9) > current_req->nr_sectors && 2515 if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
2516 CT(COMMAND) == FD_WRITE) {
2517 DPRINT("in copy buffer\n"); 2516 DPRINT("in copy buffer\n");
2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2517 printk("current_count_sectors=%ld\n", current_count_sectors);
2519 printk("remaining=%d\n", remaining >> 9); 2518 printk("remaining=%d\n", remaining >> 9);
2520 printk("current_req->nr_sectors=%ld\n", 2519 printk("current_req->nr_sectors=%u\n",
2521 current_req->nr_sectors); 2520 blk_rq_sectors(current_req));
2522 printk("current_req->current_nr_sectors=%u\n", 2521 printk("current_req->current_nr_sectors=%u\n",
2523 current_req->current_nr_sectors); 2522 blk_rq_cur_sectors(current_req));
2524 printk("max_sector=%d\n", max_sector); 2523 printk("max_sector=%d\n", max_sector);
2525 printk("ssize=%d\n", ssize); 2524 printk("ssize=%d\n", ssize);
2526 } 2525 }
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2530 2529
2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2530 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2532 2531
2533 size = current_req->current_nr_sectors << 9; 2532 size = blk_rq_cur_bytes(current_req);
2534 2533
2535 rq_for_each_segment(bv, current_req, iter) { 2534 rq_for_each_segment(bv, current_req, iter) {
2536 if (!remaining) 2535 if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
2648 2647
2649 max_sector = _floppy->sect * _floppy->head; 2648 max_sector = _floppy->sect * _floppy->head;
2650 2649
2651 TRACK = (int)current_req->sector / max_sector; 2650 TRACK = (int)blk_rq_pos(current_req) / max_sector;
2652 fsector_t = (int)current_req->sector % max_sector; 2651 fsector_t = (int)blk_rq_pos(current_req) % max_sector;
2653 if (_floppy->track && TRACK >= _floppy->track) { 2652 if (_floppy->track && TRACK >= _floppy->track) {
2654 if (current_req->current_nr_sectors & 1) { 2653 if (blk_rq_cur_sectors(current_req) & 1) {
2655 current_count_sectors = 1; 2654 current_count_sectors = 1;
2656 return 1; 2655 return 1;
2657 } else 2656 } else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
2669 if (fsector_t >= max_sector) { 2668 if (fsector_t >= max_sector) {
2670 current_count_sectors = 2669 current_count_sectors =
2671 min_t(int, _floppy->sect - fsector_t, 2670 min_t(int, _floppy->sect - fsector_t,
2672 current_req->nr_sectors); 2671 blk_rq_sectors(current_req));
2673 return 1; 2672 return 1;
2674 } 2673 }
2675 SIZECODE = 2; 2674 SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
2720 2719
2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2720 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2722 aligned_sector_t = fsector_t - in_sector_offset; 2721 aligned_sector_t = fsector_t - in_sector_offset;
2723 max_size = current_req->nr_sectors; 2722 max_size = blk_rq_sectors(current_req);
2724 if ((raw_cmd->track == buffer_track) && 2723 if ((raw_cmd->track == buffer_track) &&
2725 (current_drive == buffer_drive) && 2724 (current_drive == buffer_drive) &&
2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { 2725 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
2729 copy_buffer(1, max_sector, buffer_max); 2728 copy_buffer(1, max_sector, buffer_max);
2730 return 1; 2729 return 1;
2731 } 2730 }
2732 } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2731 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
2733 if (CT(COMMAND) == FD_WRITE) { 2732 if (CT(COMMAND) == FD_WRITE) {
2734 if (fsector_t + current_req->nr_sectors > ssize && 2733 if (fsector_t + blk_rq_sectors(current_req) > ssize &&
2735 fsector_t + current_req->nr_sectors < ssize + ssize) 2734 fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
2736 max_size = ssize + ssize; 2735 max_size = ssize + ssize;
2737 else 2736 else
2738 max_size = ssize; 2737 max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
2776 (indirect * 2 > direct * 3 && 2775 (indirect * 2 > direct * 3 &&
2777 *errors < DP->max_errors.read_track && ((!probing 2776 *errors < DP->max_errors.read_track && ((!probing
2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2777 || (DP->read_track & (1 << DRS->probed_format)))))) {
2779 max_size = current_req->nr_sectors; 2778 max_size = blk_rq_sectors(current_req);
2780 } else { 2779 } else {
2781 raw_cmd->kernel_data = current_req->buffer; 2780 raw_cmd->kernel_data = current_req->buffer;
2782 raw_cmd->length = current_count_sectors << 9; 2781 raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
2801 fsector_t > buffer_max || 2800 fsector_t > buffer_max ||
2802 fsector_t < buffer_min || 2801 fsector_t < buffer_min ||
2803 ((CT(COMMAND) == FD_READ || 2802 ((CT(COMMAND) == FD_READ ||
2804 (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2803 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2804 max_sector > 2 * max_buffer_sectors + buffer_min &&
2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2805 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2807 /* not enough space */ 2806 /* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
2879 printk("write\n"); 2878 printk("write\n");
2880 return 0; 2879 return 0;
2881 } 2880 }
2882 } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2881 } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
2883 current_count_sectors > current_req->nr_sectors) { 2882 current_count_sectors > blk_rq_sectors(current_req)) {
2884 DPRINT("buffer overrun in direct transfer\n"); 2883 DPRINT("buffer overrun in direct transfer\n");
2885 return 0; 2884 return 0;
2886 } else if (raw_cmd->length < current_count_sectors << 9) { 2885 } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
2913 struct request *req; 2912 struct request *req;
2914 2913
2915 spin_lock_irq(floppy_queue->queue_lock); 2914 spin_lock_irq(floppy_queue->queue_lock);
2916 req = elv_next_request(floppy_queue); 2915 req = blk_fetch_request(floppy_queue);
2917 spin_unlock_irq(floppy_queue->queue_lock); 2916 spin_unlock_irq(floppy_queue->queue_lock);
2918 if (!req) { 2917 if (!req) {
2919 do_floppy = NULL; 2918 do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
2990 if (usage_count == 0) { 2989 if (usage_count == 0) {
2991 printk("warning: usage count=0, current_req=%p exiting\n", 2990 printk("warning: usage count=0, current_req=%p exiting\n",
2992 current_req); 2991 current_req);
2993 printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2992 printk("sect=%ld type=%x flags=%x\n",
2994 current_req->cmd_type, current_req->cmd_flags); 2993 (long)blk_rq_pos(current_req), current_req->cmd_type,
2994 current_req->cmd_flags);
2995 return; 2995 return;
2996 } 2996 }
2997 if (test_bit(0, &fdc_busy)) { 2997 if (test_bit(0, &fdc_busy)) {
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e50..961de56d00a9 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
98 98
99static DEFINE_SPINLOCK(hd_lock); 99static DEFINE_SPINLOCK(hd_lock);
100static struct request_queue *hd_queue; 100static struct request_queue *hd_queue;
101static struct request *hd_req;
101 102
102#define MAJOR_NR HD_MAJOR 103#define MAJOR_NR HD_MAJOR
103#define QUEUE (hd_queue)
104#define CURRENT elv_next_request(hd_queue)
105 104
106#define TIMEOUT_VALUE (6*HZ) 105#define TIMEOUT_VALUE (6*HZ)
107#define HD_DELAY 0 106#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
195 NR_HD = hdind+1; 194 NR_HD = hdind+1;
196} 195}
197 196
197static bool hd_end_request(int err, unsigned int bytes)
198{
199 if (__blk_end_request(hd_req, err, bytes))
200 return true;
201 hd_req = NULL;
202 return false;
203}
204
205static bool hd_end_request_cur(int err)
206{
207 return hd_end_request(err, blk_rq_cur_bytes(hd_req));
208}
209
198static void dump_status(const char *msg, unsigned int stat) 210static void dump_status(const char *msg, unsigned int stat)
199{ 211{
200 char *name = "hd?"; 212 char *name = "hd?";
201 if (CURRENT) 213 if (hd_req)
202 name = CURRENT->rq_disk->disk_name; 214 name = hd_req->rq_disk->disk_name;
203 215
204#ifdef VERBOSE_ERRORS 216#ifdef VERBOSE_ERRORS
205 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 217 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
227 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { 239 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 240 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 241 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
230 if (CURRENT) 242 if (hd_req)
231 printk(", sector=%ld", CURRENT->sector); 243 printk(", sector=%ld", blk_rq_pos(hd_req));
232 } 244 }
233 printk("\n"); 245 printk("\n");
234 } 246 }
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
406 */ 418 */
407static void bad_rw_intr(void) 419static void bad_rw_intr(void)
408{ 420{
409 struct request *req = CURRENT; 421 struct request *req = hd_req;
422
410 if (req != NULL) { 423 if (req != NULL) {
411 struct hd_i_struct *disk = req->rq_disk->private_data; 424 struct hd_i_struct *disk = req->rq_disk->private_data;
412 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { 425 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
413 end_request(req, 0); 426 hd_end_request_cur(-EIO);
414 disk->special_op = disk->recalibrate = 1; 427 disk->special_op = disk->recalibrate = 1;
415 } else if (req->errors % RESET_FREQ == 0) 428 } else if (req->errors % RESET_FREQ == 0)
416 reset = 1; 429 reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
452 bad_rw_intr(); 465 bad_rw_intr();
453 hd_request(); 466 hd_request();
454 return; 467 return;
468
455ok_to_read: 469ok_to_read:
456 req = CURRENT; 470 req = hd_req;
457 insw(HD_DATA, req->buffer, 256); 471 insw(HD_DATA, req->buffer, 256);
458 req->sector++;
459 req->buffer += 512;
460 req->errors = 0;
461 i = --req->nr_sectors;
462 --req->current_nr_sectors;
463#ifdef DEBUG 472#ifdef DEBUG
464 printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 473 printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
465 req->rq_disk->disk_name, req->sector, req->nr_sectors, 474 req->rq_disk->disk_name, blk_rq_pos(req) + 1,
466 req->buffer+512); 475 blk_rq_sectors(req) - 1, req->buffer+512);
467#endif 476#endif
468 if (req->current_nr_sectors <= 0) 477 if (hd_end_request(0, 512)) {
469 end_request(req, 1);
470 if (i > 0) {
471 SET_HANDLER(&read_intr); 478 SET_HANDLER(&read_intr);
472 return; 479 return;
473 } 480 }
481
474 (void) inb_p(HD_STATUS); 482 (void) inb_p(HD_STATUS);
475#if (HD_DELAY > 0) 483#if (HD_DELAY > 0)
476 last_req = read_timer(); 484 last_req = read_timer();
477#endif 485#endif
478 if (elv_next_request(QUEUE)) 486 hd_request();
479 hd_request();
480 return;
481} 487}
482 488
483static void write_intr(void) 489static void write_intr(void)
484{ 490{
485 struct request *req = CURRENT; 491 struct request *req = hd_req;
486 int i; 492 int i;
487 int retries = 100000; 493 int retries = 100000;
488 494
@@ -492,30 +498,25 @@ static void write_intr(void)
492 continue; 498 continue;
493 if (!OK_STATUS(i)) 499 if (!OK_STATUS(i))
494 break; 500 break;
495 if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 501 if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
496 goto ok_to_write; 502 goto ok_to_write;
497 } while (--retries > 0); 503 } while (--retries > 0);
498 dump_status("write_intr", i); 504 dump_status("write_intr", i);
499 bad_rw_intr(); 505 bad_rw_intr();
500 hd_request(); 506 hd_request();
501 return; 507 return;
508
502ok_to_write: 509ok_to_write:
503 req->sector++; 510 if (hd_end_request(0, 512)) {
504 i = --req->nr_sectors;
505 --req->current_nr_sectors;
506 req->buffer += 512;
507 if (!i || (req->bio && req->current_nr_sectors <= 0))
508 end_request(req, 1);
509 if (i > 0) {
510 SET_HANDLER(&write_intr); 511 SET_HANDLER(&write_intr);
511 outsw(HD_DATA, req->buffer, 256); 512 outsw(HD_DATA, req->buffer, 256);
512 } else { 513 return;
514 }
515
513#if (HD_DELAY > 0) 516#if (HD_DELAY > 0)
514 last_req = read_timer(); 517 last_req = read_timer();
515#endif 518#endif
516 hd_request(); 519 hd_request();
517 }
518 return;
519} 520}
520 521
521static void recal_intr(void) 522static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
537 538
538 do_hd = NULL; 539 do_hd = NULL;
539 540
540 if (!CURRENT) 541 if (!hd_req)
541 return; 542 return;
542 543
543 spin_lock_irq(hd_queue->queue_lock); 544 spin_lock_irq(hd_queue->queue_lock);
544 reset = 1; 545 reset = 1;
545 name = CURRENT->rq_disk->disk_name; 546 name = hd_req->rq_disk->disk_name;
546 printk("%s: timeout\n", name); 547 printk("%s: timeout\n", name);
547 if (++CURRENT->errors >= MAX_ERRORS) { 548 if (++hd_req->errors >= MAX_ERRORS) {
548#ifdef DEBUG 549#ifdef DEBUG
549 printk("%s: too many errors\n", name); 550 printk("%s: too many errors\n", name);
550#endif 551#endif
551 end_request(CURRENT, 0); 552 hd_end_request_cur(-EIO);
552 } 553 }
553 hd_request(); 554 hd_request();
554 spin_unlock_irq(hd_queue->queue_lock); 555 spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
563 } 564 }
564 if (disk->head > 16) { 565 if (disk->head > 16) {
565 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
566 end_request(req, 0); 567 hd_end_request_cur(-EIO);
567 } 568 }
568 disk->special_op = 0; 569 disk->special_op = 0;
569 return 1; 570 return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
590repeat: 591repeat:
591 del_timer(&device_timer); 592 del_timer(&device_timer);
592 593
593 req = CURRENT; 594 if (!hd_req) {
594 if (!req) { 595 hd_req = blk_fetch_request(hd_queue);
595 do_hd = NULL; 596 if (!hd_req) {
596 return; 597 do_hd = NULL;
598 return;
599 }
597 } 600 }
601 req = hd_req;
598 602
599 if (reset) { 603 if (reset) {
600 reset_hd(); 604 reset_hd();
601 return; 605 return;
602 } 606 }
603 disk = req->rq_disk->private_data; 607 disk = req->rq_disk->private_data;
604 block = req->sector; 608 block = blk_rq_pos(req);
605 nsect = req->nr_sectors; 609 nsect = blk_rq_sectors(req);
606 if (block >= get_capacity(req->rq_disk) || 610 if (block >= get_capacity(req->rq_disk) ||
607 ((block+nsect) > get_capacity(req->rq_disk))) { 611 ((block+nsect) > get_capacity(req->rq_disk))) {
608 printk("%s: bad access: block=%d, count=%d\n", 612 printk("%s: bad access: block=%d, count=%d\n",
609 req->rq_disk->disk_name, block, nsect); 613 req->rq_disk->disk_name, block, nsect);
610 end_request(req, 0); 614 hd_end_request_cur(-EIO);
611 goto repeat; 615 goto repeat;
612 } 616 }
613 617
@@ -647,7 +651,7 @@ repeat:
647 break; 651 break;
648 default: 652 default:
649 printk("unknown hd-command\n"); 653 printk("unknown hd-command\n");
650 end_request(req, 0); 654 hd_end_request_cur(-EIO);
651 break; 655 break;
652 } 656 }
653 } 657 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae80825899..801f4ab83302 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
511 */ 511 */
512static void loop_add_bio(struct loop_device *lo, struct bio *bio) 512static void loop_add_bio(struct loop_device *lo, struct bio *bio)
513{ 513{
514 if (lo->lo_biotail) { 514 bio_list_add(&lo->lo_bio_list, bio);
515 lo->lo_biotail->bi_next = bio;
516 lo->lo_biotail = bio;
517 } else
518 lo->lo_bio = lo->lo_biotail = bio;
519} 515}
520 516
521/* 517/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
523 */ 519 */
524static struct bio *loop_get_bio(struct loop_device *lo) 520static struct bio *loop_get_bio(struct loop_device *lo)
525{ 521{
526 struct bio *bio; 522 return bio_list_pop(&lo->lo_bio_list);
527
528 if ((bio = lo->lo_bio)) {
529 if (bio == lo->lo_biotail)
530 lo->lo_biotail = NULL;
531 lo->lo_bio = bio->bi_next;
532 bio->bi_next = NULL;
533 }
534
535 return bio;
536} 523}
537 524
538static int loop_make_request(struct request_queue *q, struct bio *old_bio) 525static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
609 596
610 set_user_nice(current, -20); 597 set_user_nice(current, -20);
611 598
612 while (!kthread_should_stop() || lo->lo_bio) { 599 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
613 600
614 wait_event_interruptible(lo->lo_event, 601 wait_event_interruptible(lo->lo_event,
615 lo->lo_bio || kthread_should_stop()); 602 !bio_list_empty(&lo->lo_bio_list) ||
603 kthread_should_stop());
616 604
617 if (!lo->lo_bio) 605 if (bio_list_empty(&lo->lo_bio_list))
618 continue; 606 continue;
619 spin_lock_irq(&lo->lo_lock); 607 spin_lock_irq(&lo->lo_lock);
620 bio = loop_get_bio(lo); 608 bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
721 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 709 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
722 goto out_putf; 710 goto out_putf;
723 711
724 /* new backing store needs to support loop (eg splice_read) */
725 if (!inode->i_fop->splice_read)
726 goto out_putf;
727
728 /* size of the new backing store needs to be the same */ 712 /* size of the new backing store needs to be the same */
729 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 713 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
730 goto out_putf; 714 goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
800 error = -EINVAL; 784 error = -EINVAL;
801 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 785 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
802 const struct address_space_operations *aops = mapping->a_ops; 786 const struct address_space_operations *aops = mapping->a_ops;
803 /* 787
804 * If we can't read - sorry. If we only can't write - well,
805 * it's going to be read-only.
806 */
807 if (!file->f_op->splice_read)
808 goto out_putf;
809 if (aops->write_begin) 788 if (aops->write_begin)
810 lo_flags |= LO_FLAGS_USE_AOPS; 789 lo_flags |= LO_FLAGS_USE_AOPS;
811 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 790 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
841 lo->old_gfp_mask = mapping_gfp_mask(mapping); 820 lo->old_gfp_mask = mapping_gfp_mask(mapping);
842 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 821 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
843 822
844 lo->lo_bio = lo->lo_biotail = NULL; 823 bio_list_init(&lo->lo_bio_list);
845 824
846 /* 825 /*
847 * set queue make_request_fn, and add limits based on lower level 826 * set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a8..c0cd0a03f698 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/hdreg.h> 19#include <linux/hdreg.h>
20#include <linux/libata.h> 20#include <linux/ata.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26 25
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) 26#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28 27
28/* name for block device */
29#define MG_DISK_NAME "mgd"
30/* name for platform device */
31#define MG_DEV_NAME "mg_disk"
32
33#define MG_DISK_MAJ 0
34#define MG_DISK_MAX_PART 16
35#define MG_SECTOR_SIZE 512
36#define MG_MAX_SECTS 256
37
38/* Register offsets */
39#define MG_BUFF_OFFSET 0x8000
40#define MG_STORAGE_BUFFER_SIZE 0x200
41#define MG_REG_OFFSET 0xC000
42#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53
54/* handy status */
55#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
56#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
57 ATA_ERR))) == MG_STAT_READY)
58
59/* error code for others */
60#define MG_ERR_NONE 0
61#define MG_ERR_TIMEOUT 0x100
62#define MG_ERR_INIT_STAT 0x101
63#define MG_ERR_TRANSLATION 0x102
64#define MG_ERR_CTRL_RST 0x103
65#define MG_ERR_INV_STAT 0x104
66#define MG_ERR_RSTOUT 0x105
67
68#define MG_MAX_ERRORS 6 /* Max read/write errors */
69
70/* command */
71#define MG_CMD_RD 0x20
72#define MG_CMD_WR 0x30
73#define MG_CMD_SLEEP 0x99
74#define MG_CMD_WAKEUP 0xC3
75#define MG_CMD_ID 0xEC
76#define MG_CMD_WR_CONF 0x3C
77#define MG_CMD_RD_CONF 0x40
78
79/* operation mode */
80#define MG_OP_CASCADE (1 << 0)
81#define MG_OP_CASCADE_SYNC_RD (1 << 1)
82#define MG_OP_CASCADE_SYNC_WR (1 << 2)
83#define MG_OP_INTERLEAVE (1 << 3)
84
85/* synchronous */
86#define MG_BURST_LAT_4 (3 << 4)
87#define MG_BURST_LAT_5 (4 << 4)
88#define MG_BURST_LAT_6 (5 << 4)
89#define MG_BURST_LAT_7 (6 << 4)
90#define MG_BURST_LAT_8 (7 << 4)
91#define MG_BURST_LEN_4 (1 << 1)
92#define MG_BURST_LEN_8 (2 << 1)
93#define MG_BURST_LEN_16 (3 << 1)
94#define MG_BURST_LEN_32 (4 << 1)
95#define MG_BURST_LEN_CONT (0 << 1)
96
97/* timeout value (unit: ms) */
98#define MG_TMAX_CONF_TO_CMD 1
99#define MG_TMAX_WAIT_RD_DRQ 10
100#define MG_TMAX_WAIT_WR_DRQ 500
101#define MG_TMAX_RST_TO_BUSY 10
102#define MG_TMAX_HDRST_TO_RDY 500
103#define MG_TMAX_SWRST_TO_RDY 500
104#define MG_TMAX_RSTOUT 3000
105
106/* device attribution */
107/* use mflash as boot device */
108#define MG_BOOT_DEV (1 << 0)
109/* use mflash as storage device */
110#define MG_STORAGE_DEV (1 << 1)
111/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
112#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
113
114#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
115
116/* names of GPIO resource */
117#define MG_RST_PIN "mg_rst"
118/* except MG_BOOT_DEV, reset-out pin should be assigned */
119#define MG_RSTOUT_PIN "mg_rstout"
120
121/* private driver data */
122struct mg_drv_data {
123 /* disk resource */
124 u32 use_polling;
125
126 /* device attribution */
127 u32 dev_attr;
128
129 /* internally used */
130 struct mg_host *host;
131};
132
133/* main structure for mflash driver */
134struct mg_host {
135 struct device *dev;
136
137 struct request_queue *breq;
138 struct request *req;
139 spinlock_t lock;
140 struct gendisk *gd;
141
142 struct timer_list timer;
143 void (*mg_do_intr) (struct mg_host *);
144
145 u16 id[ATA_ID_WORDS];
146
147 u16 cyls;
148 u16 heads;
149 u16 sectors;
150 u32 n_sectors;
151 u32 nres_sectors;
152
153 void __iomem *dev_base;
154 unsigned int irq;
155 unsigned int rst;
156 unsigned int rstout;
157
158 u32 major;
159 u32 error;
160};
161
162/*
163 * Debugging macro and defines
164 */
165#undef DO_MG_DEBUG
166#ifdef DO_MG_DEBUG
167# define MG_DBG(fmt, args...) \
168 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
169#else /* CONFIG_MG_DEBUG */
170# define MG_DBG(fmt, args...) do { } while (0)
171#endif /* CONFIG_MG_DEBUG */
172
29static void mg_request(struct request_queue *); 173static void mg_request(struct request_queue *);
30 174
175static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
176{
177 if (__blk_end_request(host->req, err, nr_bytes))
178 return true;
179
180 host->req = NULL;
181 return false;
182}
183
184static bool mg_end_request_cur(struct mg_host *host, int err)
185{
186 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
187}
188
31static void mg_dump_status(const char *msg, unsigned int stat, 189static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host) 190 struct mg_host *host)
33{ 191{
34 char *name = MG_DISK_NAME; 192 char *name = MG_DISK_NAME;
35 struct request *req;
36 193
37 if (host->breq) { 194 if (host->req)
38 req = elv_next_request(host->breq); 195 name = host->req->rq_disk->disk_name;
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42 196
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 197 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY) 198 if (stat & ATA_BUSY)
45 printk("Busy "); 199 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY) 200 if (stat & ATA_DRDY)
47 printk("DriveReady "); 201 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT) 202 if (stat & ATA_DF)
49 printk("WriteFault "); 203 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE) 204 if (stat & ATA_DSC)
51 printk("SeekComplete "); 205 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ) 206 if (stat & ATA_DRQ)
53 printk("DataRequest "); 207 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR) 208 if (stat & ATA_CORR)
55 printk("CorrectedError "); 209 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR) 210 if (stat & ATA_ERR)
57 printk("Error "); 211 printk("Error ");
58 printk("}\n"); 212 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) { 213 if ((stat & ATA_ERR) == 0) {
60 host->error = 0; 214 host->error = 0;
61 } else { 215 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); 216 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, 217 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff); 218 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK) 219 if (host->error & ATA_BBK)
66 printk("BadSector "); 220 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC) 221 if (host->error & ATA_UNC)
68 printk("UncorrectableError "); 222 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF) 223 if (host->error & ATA_IDNF)
70 printk("SectorIdNotFound "); 224 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT) 225 if (host->error & ATA_ABORTED)
72 printk("DriveStatusError "); 226 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF) 227 if (host->error & ATA_AMNF)
74 printk("AddrMarkNotFound "); 228 printk("AddrMarkNotFound ");
75 printk("}"); 229 printk("}");
76 if (host->error & 230 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC | 231 if (host->req)
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) { 232 printk(", sector=%u",
79 if (host->breq) { 233 (unsigned int)blk_rq_pos(host->req));
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%u", (u32)req->sector);
83 }
84
85 } 234 }
86 printk("\n"); 235 printk("\n");
87 } 236 }
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
100 249
101 do { 250 do {
102 cur_jiffies = jiffies; 251 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) { 252 if (status & ATA_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY) 253 if (expect == ATA_BUSY)
105 break; 254 break;
106 } else { 255 } else {
107 /* Check the error condition! */ 256 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) { 257 if (status & ATA_ERR) {
109 mg_dump_status("mg_wait", status, host); 258 mg_dump_status("mg_wait", status, host);
110 break; 259 break;
111 } 260 }
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
114 if (MG_READY_OK(status)) 263 if (MG_READY_OK(status))
115 break; 264 break;
116 265
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ) 266 if (expect == ATA_DRQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ) 267 if (status & ATA_DRQ)
119 break; 268 break;
120 } 269 }
121 if (!msec) { 270 if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
173 return IRQ_HANDLED; 322 return IRQ_HANDLED;
174} 323}
175 324
325/* local copy of ata_id_string() */
326static void mg_id_string(const u16 *id, unsigned char *s,
327 unsigned int ofs, unsigned int len)
328{
329 unsigned int c;
330
331 BUG_ON(len & 1);
332
333 while (len > 0) {
334 c = id[ofs] >> 8;
335 *s = c;
336 s++;
337
338 c = id[ofs] & 0xff;
339 *s = c;
340 s++;
341
342 ofs++;
343 len -= 2;
344 }
345}
346
347/* local copy of ata_id_c_string() */
348static void mg_id_c_string(const u16 *id, unsigned char *s,
349 unsigned int ofs, unsigned int len)
350{
351 unsigned char *p;
352
353 mg_id_string(id, s, ofs, len - 1);
354
355 p = s + strnlen(s, len - 1);
356 while (p > s && p[-1] == ' ')
357 p--;
358 *p = '\0';
359}
360
176static int mg_get_disk_id(struct mg_host *host) 361static int mg_get_disk_id(struct mg_host *host)
177{ 362{
178 u32 i; 363 u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
184 char serial[ATA_ID_SERNO_LEN + 1]; 369 char serial[ATA_ID_SERNO_LEN + 1];
185 370
186 if (!prv_data->use_polling) 371 if (!prv_data->use_polling)
187 outb(MG_REG_CTRL_INTR_DISABLE, 372 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
188 (unsigned long)host->dev_base +
189 MG_REG_DRV_CTRL);
190 373
191 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); 374 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
192 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ); 375 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
193 if (err) 376 if (err)
194 return err; 377 return err;
195 378
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
219 host->n_sectors -= host->nres_sectors; 402 host->n_sectors -= host->nres_sectors;
220 } 403 }
221 404
222 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 405 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
223 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 406 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
224 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 407 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
225 printk(KERN_INFO "mg_disk: model: %s\n", model); 408 printk(KERN_INFO "mg_disk: model: %s\n", model);
226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); 409 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
227 printk(KERN_INFO "mg_disk: serial: %s\n", serial); 410 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
229 host->n_sectors, host->nres_sectors); 412 host->n_sectors, host->nres_sectors);
230 413
231 if (!prv_data->use_polling) 414 if (!prv_data->use_polling)
232 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 415 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
233 MG_REG_DRV_CTRL);
234 416
235 return err; 417 return err;
236} 418}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
244 426
245 /* hdd rst low */ 427 /* hdd rst low */
246 gpio_set_value(host->rst, 0); 428 gpio_set_value(host->rst, 0);
247 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 429 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
248 if (err) 430 if (err)
249 return err; 431 return err;
250 432
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
255 return err; 437 return err;
256 438
257 /* soft reset on */ 439 /* soft reset on */
258 outb(MG_REG_CTRL_RESET | 440 outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
259 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
260 MG_REG_CTRL_INTR_ENABLE),
261 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 441 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
262 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 442 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
263 if (err) 443 if (err)
264 return err; 444 return err;
265 445
266 /* soft reset off */ 446 /* soft reset off */
267 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 447 outb(prv_data->use_polling ? ATA_NIEN : 0,
268 MG_REG_CTRL_INTR_ENABLE,
269 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 448 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
270 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); 449 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
271 if (err) 450 if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
281 460
282static void mg_bad_rw_intr(struct mg_host *host) 461static void mg_bad_rw_intr(struct mg_host *host)
283{ 462{
284 struct request *req = elv_next_request(host->breq); 463 if (host->req)
285 if (req != NULL) 464 if (++host->req->errors >= MG_MAX_ERRORS ||
286 if (++req->errors >= MG_MAX_ERRORS || 465 host->error == MG_ERR_TIMEOUT)
287 host->error == MG_ERR_TIMEOUT) 466 mg_end_request_cur(host, -EIO);
288 end_request(req, 0);
289} 467}
290 468
291static unsigned int mg_out(struct mg_host *host, 469static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
311 MG_REG_CYL_LOW); 489 MG_REG_CYL_LOW);
312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + 490 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
313 MG_REG_CYL_HIGH); 491 MG_REG_CYL_HIGH);
314 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE), 492 outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); 493 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); 494 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
317 return MG_ERR_NONE; 495 return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
319 497
320static void mg_read(struct request *req) 498static void mg_read(struct request *req)
321{ 499{
322 u32 remains, j; 500 u32 j;
323 struct mg_host *host = req->rq_disk->private_data; 501 struct mg_host *host = req->rq_disk->private_data;
324 502
325 remains = req->nr_sectors; 503 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
326 504 MG_CMD_RD, NULL) != MG_ERR_NONE)
327 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
328 MG_ERR_NONE)
329 mg_bad_rw_intr(host); 505 mg_bad_rw_intr(host);
330 506
331 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 507 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
332 remains, req->sector, req->buffer); 508 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
509
510 do {
511 u16 *buff = (u16 *)req->buffer;
333 512
334 while (remains) { 513 if (mg_wait(host, ATA_DRQ,
335 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 514 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
336 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
337 mg_bad_rw_intr(host); 515 mg_bad_rw_intr(host);
338 return; 516 return;
339 } 517 }
340 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 518 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
341 *(u16 *)req->buffer = 519 *buff++ = inw((unsigned long)host->dev_base +
342 inw((unsigned long)host->dev_base + 520 MG_BUFF_OFFSET + (j << 1));
343 MG_BUFF_OFFSET + (j << 1));
344 req->buffer += 2;
345 }
346
347 req->sector++;
348 req->errors = 0;
349 remains = --req->nr_sectors;
350 --req->current_nr_sectors;
351
352 if (req->current_nr_sectors <= 0) {
353 MG_DBG("remain : %d sects\n", remains);
354 end_request(req, 1);
355 if (remains > 0)
356 req = elv_next_request(host->breq);
357 }
358 521
359 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 522 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
360 MG_REG_COMMAND); 523 MG_REG_COMMAND);
361 } 524 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
362} 525}
363 526
364static void mg_write(struct request *req) 527static void mg_write(struct request *req)
365{ 528{
366 u32 remains, j; 529 u32 j;
367 struct mg_host *host = req->rq_disk->private_data; 530 struct mg_host *host = req->rq_disk->private_data;
368 531
369 remains = req->nr_sectors; 532 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
370 533 MG_CMD_WR, NULL) != MG_ERR_NONE) {
371 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
372 MG_ERR_NONE) {
373 mg_bad_rw_intr(host); 534 mg_bad_rw_intr(host);
374 return; 535 return;
375 } 536 }
376 537
377
378 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 538 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
379 remains, req->sector, req->buffer); 539 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
380 while (remains) { 540
381 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 541 do {
382 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 542 u16 *buff = (u16 *)req->buffer;
543
544 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
383 mg_bad_rw_intr(host); 545 mg_bad_rw_intr(host);
384 return; 546 return;
385 } 547 }
386 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 548 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
387 outw(*(u16 *)req->buffer, 549 outw(*buff++, (unsigned long)host->dev_base +
388 (unsigned long)host->dev_base + 550 MG_BUFF_OFFSET + (j << 1));
389 MG_BUFF_OFFSET + (j << 1));
390 req->buffer += 2;
391 }
392 req->sector++;
393 remains = --req->nr_sectors;
394 --req->current_nr_sectors;
395
396 if (req->current_nr_sectors <= 0) {
397 MG_DBG("remain : %d sects\n", remains);
398 end_request(req, 1);
399 if (remains > 0)
400 req = elv_next_request(host->breq);
401 }
402 551
403 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 552 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
404 MG_REG_COMMAND); 553 MG_REG_COMMAND);
405 } 554 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
406} 555}
407 556
408static void mg_read_intr(struct mg_host *host) 557static void mg_read_intr(struct mg_host *host)
409{ 558{
559 struct request *req = host->req;
410 u32 i; 560 u32 i;
411 struct request *req; 561 u16 *buff;
412 562
413 /* check status */ 563 /* check status */
414 do { 564 do {
415 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 565 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
416 if (i & MG_REG_STATUS_BIT_BUSY) 566 if (i & ATA_BUSY)
417 break; 567 break;
418 if (!MG_READY_OK(i)) 568 if (!MG_READY_OK(i))
419 break; 569 break;
420 if (i & MG_REG_STATUS_BIT_DATA_REQ) 570 if (i & ATA_DRQ)
421 goto ok_to_read; 571 goto ok_to_read;
422 } while (0); 572 } while (0);
423 mg_dump_status("mg_read_intr", i, host); 573 mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
427 577
428ok_to_read: 578ok_to_read:
429 /* get current segment of request */ 579 /* get current segment of request */
430 req = elv_next_request(host->breq); 580 buff = (u16 *)req->buffer;
431 581
432 /* read 1 sector */ 582 /* read 1 sector */
433 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 583 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
434 *(u16 *)req->buffer = 584 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
435 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 585 (i << 1));
436 (i << 1));
437 req->buffer += 2;
438 }
439 586
440 /* manipulate request */
441 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 587 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
442 req->sector, req->nr_sectors - 1, req->buffer); 588 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
443
444 req->sector++;
445 req->errors = 0;
446 i = --req->nr_sectors;
447 --req->current_nr_sectors;
448
449 /* let know if current segment done */
450 if (req->current_nr_sectors <= 0)
451 end_request(req, 1);
452
453 /* set handler if read remains */
454 if (i > 0) {
455 host->mg_do_intr = mg_read_intr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
457 }
458 589
459 /* send read confirm */ 590 /* send read confirm */
460 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
461 592
462 /* goto next request */ 593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
463 if (!i) 594 /* set handler if read remains */
595 host->mg_do_intr = mg_read_intr;
596 mod_timer(&host->timer, jiffies + 3 * HZ);
597 } else /* goto next request */
464 mg_request(host->breq); 598 mg_request(host->breq);
465} 599}
466 600
467static void mg_write_intr(struct mg_host *host) 601static void mg_write_intr(struct mg_host *host)
468{ 602{
603 struct request *req = host->req;
469 u32 i, j; 604 u32 i, j;
470 u16 *buff; 605 u16 *buff;
471 struct request *req; 606 bool rem;
472
473 /* get current segment of request */
474 req = elv_next_request(host->breq);
475 607
476 /* check status */ 608 /* check status */
477 do { 609 do {
478 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 610 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
479 if (i & MG_REG_STATUS_BIT_BUSY) 611 if (i & ATA_BUSY)
480 break; 612 break;
481 if (!MG_READY_OK(i)) 613 if (!MG_READY_OK(i))
482 break; 614 break;
483 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ)) 615 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
484 goto ok_to_write; 616 goto ok_to_write;
485 } while (0); 617 } while (0);
486 mg_dump_status("mg_write_intr", i, host); 618 mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
489 return; 621 return;
490 622
491ok_to_write: 623ok_to_write:
492 /* manipulate request */ 624 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
493 req->sector++; 625 /* write 1 sector and set handler if remains */
494 i = --req->nr_sectors;
495 --req->current_nr_sectors;
496 req->buffer += MG_SECTOR_SIZE;
497
498 /* let know if current segment or all done */
499 if (!i || (req->bio && req->current_nr_sectors <= 0))
500 end_request(req, 1);
501
502 /* write 1 sector and set handler if remains */
503 if (i > 0) {
504 buff = (u16 *)req->buffer; 626 buff = (u16 *)req->buffer;
505 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 627 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
506 outw(*buff, (unsigned long)host->dev_base + 628 outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
508 buff++; 630 buff++;
509 } 631 }
510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 632 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
511 req->sector, req->nr_sectors, req->buffer); 633 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
512 host->mg_do_intr = mg_write_intr; 634 host->mg_do_intr = mg_write_intr;
513 mod_timer(&host->timer, jiffies + 3 * HZ); 635 mod_timer(&host->timer, jiffies + 3 * HZ);
514 } 636 }
@@ -516,7 +638,7 @@ ok_to_write:
516 /* send write confirm */ 638 /* send write confirm */
517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 639 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
518 640
519 if (!i) 641 if (!rem)
520 mg_request(host->breq); 642 mg_request(host->breq);
521} 643}
522 644
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
524{ 646{
525 struct mg_host *host = (struct mg_host *)data; 647 struct mg_host *host = (struct mg_host *)data;
526 char *name; 648 char *name;
527 struct request *req;
528 649
529 spin_lock_irq(&host->lock); 650 spin_lock_irq(&host->lock);
530 651
531 req = elv_next_request(host->breq); 652 if (!host->req)
532 if (!req)
533 goto out_unlock; 653 goto out_unlock;
534 654
535 host->mg_do_intr = NULL; 655 host->mg_do_intr = NULL;
536 656
537 name = req->rq_disk->disk_name; 657 name = host->req->rq_disk->disk_name;
538 printk(KERN_DEBUG "%s: timeout\n", name); 658 printk(KERN_DEBUG "%s: timeout\n", name);
539 659
540 host->error = MG_ERR_TIMEOUT; 660 host->error = MG_ERR_TIMEOUT;
541 mg_bad_rw_intr(host); 661 mg_bad_rw_intr(host);
542 662
543 mg_request(host->breq);
544out_unlock: 663out_unlock:
664 mg_request(host->breq);
545 spin_unlock_irq(&host->lock); 665 spin_unlock_irq(&host->lock);
546} 666}
547 667
548static void mg_request_poll(struct request_queue *q) 668static void mg_request_poll(struct request_queue *q)
549{ 669{
550 struct request *req; 670 struct mg_host *host = q->queuedata;
551 struct mg_host *host;
552 671
553 while ((req = elv_next_request(q)) != NULL) { 672 while (1) {
554 host = req->rq_disk->private_data; 673 if (!host->req) {
555 if (blk_fs_request(req)) { 674 host->req = blk_fetch_request(q);
556 switch (rq_data_dir(req)) { 675 if (!host->req)
557 case READ:
558 mg_read(req);
559 break;
560 case WRITE:
561 mg_write(req);
562 break;
563 default:
564 printk(KERN_WARNING "%s:%d unknown command\n",
565 __func__, __LINE__);
566 end_request(req, 0);
567 break; 676 break;
568 }
569 } 677 }
678
679 if (unlikely(!blk_fs_request(host->req))) {
680 mg_end_request_cur(host, -EIO);
681 continue;
682 }
683
684 if (rq_data_dir(host->req) == READ)
685 mg_read(host->req);
686 else
687 mg_write(host->req);
570 } 688 }
571} 689}
572 690
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
588 break; 706 break;
589 case WRITE: 707 case WRITE:
590 /* TODO : handler */ 708 /* TODO : handler */
591 outb(MG_REG_CTRL_INTR_DISABLE, 709 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
592 (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 710 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
595 != MG_ERR_NONE) { 711 != MG_ERR_NONE) {
596 mg_bad_rw_intr(host); 712 mg_bad_rw_intr(host);
597 return host->error; 713 return host->error;
598 } 714 }
599 del_timer(&host->timer); 715 del_timer(&host->timer);
600 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ); 716 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
601 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 717 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
602 MG_REG_DRV_CTRL);
603 if (host->error) { 718 if (host->error) {
604 mg_bad_rw_intr(host); 719 mg_bad_rw_intr(host);
605 return host->error; 720 return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 729 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
615 MG_REG_COMMAND); 730 MG_REG_COMMAND);
616 break; 731 break;
617 default:
618 printk(KERN_WARNING "%s:%d unknown command\n",
619 __func__, __LINE__);
620 end_request(req, 0);
621 break;
622 } 732 }
623 return MG_ERR_NONE; 733 return MG_ERR_NONE;
624} 734}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
626/* This function also called from IRQ context */ 736/* This function also called from IRQ context */
627static void mg_request(struct request_queue *q) 737static void mg_request(struct request_queue *q)
628{ 738{
739 struct mg_host *host = q->queuedata;
629 struct request *req; 740 struct request *req;
630 struct mg_host *host;
631 u32 sect_num, sect_cnt; 741 u32 sect_num, sect_cnt;
632 742
633 while (1) { 743 while (1) {
634 req = elv_next_request(q); 744 if (!host->req) {
635 if (!req) 745 host->req = blk_fetch_request(q);
636 return; 746 if (!host->req)
637 747 break;
638 host = req->rq_disk->private_data; 748 }
749 req = host->req;
639 750
640 /* check unwanted request call */ 751 /* check unwanted request call */
641 if (host->mg_do_intr) 752 if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
643 754
644 del_timer(&host->timer); 755 del_timer(&host->timer);
645 756
646 sect_num = req->sector; 757 sect_num = blk_rq_pos(req);
647 /* deal whole segments */ 758 /* deal whole segments */
648 sect_cnt = req->nr_sectors; 759 sect_cnt = blk_rq_sectors(req);
649 760
650 /* sanity check */ 761 /* sanity check */
651 if (sect_num >= get_capacity(req->rq_disk) || 762 if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
655 "%s: bad access: sector=%d, count=%d\n", 766 "%s: bad access: sector=%d, count=%d\n",
656 req->rq_disk->disk_name, 767 req->rq_disk->disk_name,
657 sect_num, sect_cnt); 768 sect_num, sect_cnt);
658 end_request(req, 0); 769 mg_end_request_cur(host, -EIO);
659 continue; 770 continue;
660 } 771 }
661 772
662 if (!blk_fs_request(req)) 773 if (unlikely(!blk_fs_request(req))) {
663 return; 774 mg_end_request_cur(host, -EIO);
775 continue;
776 }
664 777
665 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 778 if (!mg_issue_req(req, host, sect_num, sect_cnt))
666 return; 779 return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
690 return -EIO; 803 return -EIO;
691 804
692 if (!prv_data->use_polling) 805 if (!prv_data->use_polling)
693 outb(MG_REG_CTRL_INTR_DISABLE, 806 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
694 (unsigned long)host->dev_base +
695 MG_REG_DRV_CTRL);
696 807
697 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); 808 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
698 /* wait until mflash deep sleep */ 809 /* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
700 811
701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { 812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
702 if (!prv_data->use_polling) 813 if (!prv_data->use_polling)
703 outb(MG_REG_CTRL_INTR_ENABLE, 814 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
704 (unsigned long)host->dev_base +
705 MG_REG_DRV_CTRL);
706 return -EIO; 815 return -EIO;
707 } 816 }
708 817
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
725 return -EIO; 834 return -EIO;
726 835
727 if (!prv_data->use_polling) 836 if (!prv_data->use_polling)
728 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 837 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
729 MG_REG_DRV_CTRL);
730 838
731 return 0; 839 return 0;
732} 840}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
877 __func__, __LINE__); 985 __func__, __LINE__);
878 goto probe_err_5; 986 goto probe_err_5;
879 } 987 }
988 host->breq->queuedata = host;
880 989
881 /* mflash is random device, thanx for the noop */ 990 /* mflash is random device, thanx for the noop */
882 elevator_exit(host->breq->elevator); 991 elevator_exit(host->breq->elevator);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15ccb..5d23ffad7c77 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
110 req, error ? "failed" : "done"); 110 req, error ? "failed" : "done");
111 111
112 spin_lock_irqsave(q->queue_lock, flags); 112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request(req, error, req->nr_sectors << 9); 113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags); 114 spin_unlock_irqrestore(q->queue_lock, flags);
115} 115}
116 116
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
231{ 231{
232 int result, flags; 232 int result, flags;
233 struct nbd_request request; 233 struct nbd_request request;
234 unsigned long size = req->nr_sectors << 9; 234 unsigned long size = blk_rq_bytes(req);
235 235
236 request.magic = htonl(NBD_REQUEST_MAGIC); 236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req)); 237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64) req->sector << 9); 238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size); 239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req)); 240 memcpy(request.handle, &req, sizeof(req));
241 241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req, 243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)), 244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)req->sector << 9, 245 (unsigned long long)blk_rq_pos(req) << 9,
246 req->nr_sectors << 9); 246 blk_rq_bytes(req));
247 result = sock_xmit(lo, 1, &request, sizeof(request), 247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) { 249 if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
533{ 533{
534 struct request *req; 534 struct request *req;
535 535
536 while ((req = elv_next_request(q)) != NULL) { 536 while ((req = blk_fetch_request(q)) != NULL) {
537 struct nbd_device *lo; 537 struct nbd_device *lo;
538 538
539 blkdev_dequeue_request(req);
540
541 spin_unlock_irq(q->queue_lock); 539 spin_unlock_irq(q->queue_lock);
542 540
543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 541 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
580 blk_rq_init(NULL, &sreq); 578 blk_rq_init(NULL, &sreq);
581 sreq.cmd_type = REQ_TYPE_SPECIAL; 579 sreq.cmd_type = REQ_TYPE_SPECIAL;
582 nbd_cmd(&sreq) = NBD_CMD_DISC; 580 nbd_cmd(&sreq) = NBD_CMD_DISC;
583 /*
584 * Set these to sane values in case server implementation
585 * fails to check the request type first and also to keep
586 * debugging output cleaner.
587 */
588 sreq.sector = 0;
589 sreq.nr_sectors = 0;
590 if (!lo->sock) 581 if (!lo->sock)
591 return -EINVAL; 582 return -EINVAL;
592 nbd_send_req(lo, &sreq); 583 nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014f..911dfd98d813 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
719 if (pcd_busy) 719 if (pcd_busy)
720 return; 720 return;
721 while (1) { 721 while (1) {
722 pcd_req = elv_next_request(q); 722 if (!pcd_req) {
723 if (!pcd_req) 723 pcd_req = blk_fetch_request(q);
724 return; 724 if (!pcd_req)
725 return;
726 }
725 727
726 if (rq_data_dir(pcd_req) == READ) { 728 if (rq_data_dir(pcd_req) == READ) {
727 struct pcd_unit *cd = pcd_req->rq_disk->private_data; 729 struct pcd_unit *cd = pcd_req->rq_disk->private_data;
728 if (cd != pcd_current) 730 if (cd != pcd_current)
729 pcd_bufblk = -1; 731 pcd_bufblk = -1;
730 pcd_current = cd; 732 pcd_current = cd;
731 pcd_sector = pcd_req->sector; 733 pcd_sector = blk_rq_pos(pcd_req);
732 pcd_count = pcd_req->current_nr_sectors; 734 pcd_count = blk_rq_cur_sectors(pcd_req);
733 pcd_buf = pcd_req->buffer; 735 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1; 736 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice); 737 ps_set_intr(do_pcd_read, NULL, 0, nice);
736 return; 738 return;
737 } else 739 } else {
738 end_request(pcd_req, 0); 740 __blk_end_request_all(pcd_req, -EIO);
741 pcd_req = NULL;
742 }
739 } 743 }
740} 744}
741 745
742static inline void next_request(int success) 746static inline void next_request(int err)
743{ 747{
744 unsigned long saved_flags; 748 unsigned long saved_flags;
745 749
746 spin_lock_irqsave(&pcd_lock, saved_flags); 750 spin_lock_irqsave(&pcd_lock, saved_flags);
747 end_request(pcd_req, success); 751 if (!__blk_end_request_cur(pcd_req, err))
752 pcd_req = NULL;
748 pcd_busy = 0; 753 pcd_busy = 0;
749 do_pcd_request(pcd_queue); 754 do_pcd_request(pcd_queue);
750 spin_unlock_irqrestore(&pcd_lock, saved_flags); 755 spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
781 786
782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { 787 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
783 pcd_bufblk = -1; 788 pcd_bufblk = -1;
784 next_request(0); 789 next_request(-EIO);
785 return; 790 return;
786 } 791 }
787 792
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
796 pcd_retries = 0; 801 pcd_retries = 0;
797 pcd_transfer(); 802 pcd_transfer();
798 if (!pcd_count) { 803 if (!pcd_count) {
799 next_request(1); 804 next_request(0);
800 return; 805 return;
801 } 806 }
802 807
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
815 return; 820 return;
816 } 821 }
817 pcd_bufblk = -1; 822 pcd_bufblk = -1;
818 next_request(0); 823 next_request(-EIO);
819 return; 824 return;
820 } 825 }
821 826
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af6..bf5955b3d873 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
410 pd_claimed = 0; 410 pd_claimed = 0;
411 phase = NULL; 411 phase = NULL;
412 spin_lock_irqsave(&pd_lock, saved_flags); 412 spin_lock_irqsave(&pd_lock, saved_flags);
413 end_request(pd_req, res); 413 if (!__blk_end_request_cur(pd_req,
414 pd_req = elv_next_request(pd_queue); 414 res == Ok ? 0 : -EIO)) {
415 if (!pd_req) 415 pd_req = blk_fetch_request(pd_queue);
416 stop = 1; 416 if (!pd_req)
417 stop = 1;
418 }
417 spin_unlock_irqrestore(&pd_lock, saved_flags); 419 spin_unlock_irqrestore(&pd_lock, saved_flags);
418 if (stop) 420 if (stop)
419 return; 421 return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
443 445
444 pd_cmd = rq_data_dir(pd_req); 446 pd_cmd = rq_data_dir(pd_req);
445 if (pd_cmd == READ || pd_cmd == WRITE) { 447 if (pd_cmd == READ || pd_cmd == WRITE) {
446 pd_block = pd_req->sector; 448 pd_block = blk_rq_pos(pd_req);
447 pd_count = pd_req->current_nr_sectors; 449 pd_count = blk_rq_cur_sectors(pd_req);
448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 450 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
449 return Fail; 451 return Fail;
450 pd_run = pd_req->nr_sectors; 452 pd_run = blk_rq_sectors(pd_req);
451 pd_buf = pd_req->buffer; 453 pd_buf = pd_req->buffer;
452 pd_retries = 0; 454 pd_retries = 0;
453 if (pd_cmd == READ) 455 if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
477 if (pd_count) 479 if (pd_count)
478 return 0; 480 return 0;
479 spin_lock_irqsave(&pd_lock, saved_flags); 481 spin_lock_irqsave(&pd_lock, saved_flags);
480 end_request(pd_req, 1); 482 __blk_end_request_cur(pd_req, 0);
481 pd_count = pd_req->current_nr_sectors; 483 pd_count = blk_rq_cur_sectors(pd_req);
482 pd_buf = pd_req->buffer; 484 pd_buf = pd_req->buffer;
483 spin_unlock_irqrestore(&pd_lock, saved_flags); 485 spin_unlock_irqrestore(&pd_lock, saved_flags);
484 return 0; 486 return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
702{ 704{
703 if (pd_req) 705 if (pd_req)
704 return; 706 return;
705 pd_req = elv_next_request(q); 707 pd_req = blk_fetch_request(q);
706 if (!pd_req) 708 if (!pd_req)
707 return; 709 return;
708 710
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3e..68a90834e993 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate) 753static void pf_end_request(int err)
754{ 754{
755 if (pf_req) { 755 if (pf_req && !__blk_end_request_cur(pf_req, err))
756 end_request(pf_req, uptodate);
757 pf_req = NULL; 756 pf_req = NULL;
758 }
759} 757}
760 758
761static void do_pf_request(struct request_queue * q) 759static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
763 if (pf_busy) 761 if (pf_busy)
764 return; 762 return;
765repeat: 763repeat:
766 pf_req = elv_next_request(q); 764 if (!pf_req) {
767 if (!pf_req) 765 pf_req = blk_fetch_request(q);
768 return; 766 if (!pf_req)
767 return;
768 }
769 769
770 pf_current = pf_req->rq_disk->private_data; 770 pf_current = pf_req->rq_disk->private_data;
771 pf_block = pf_req->sector; 771 pf_block = blk_rq_pos(pf_req);
772 pf_run = pf_req->nr_sectors; 772 pf_run = blk_rq_sectors(pf_req);
773 pf_count = pf_req->current_nr_sectors; 773 pf_count = blk_rq_cur_sectors(pf_req);
774 774
775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
776 pf_end_request(0); 776 pf_end_request(-EIO);
777 goto repeat; 777 goto repeat;
778 } 778 }
779 779
@@ -788,7 +788,7 @@ repeat:
788 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
789 else { 789 else {
790 pf_busy = 0; 790 pf_busy = 0;
791 pf_end_request(0); 791 pf_end_request(-EIO);
792 goto repeat; 792 goto repeat;
793 } 793 }
794} 794}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
805 return 1; 805 return 1;
806 if (!pf_count) { 806 if (!pf_count) {
807 spin_lock_irqsave(&pf_spin_lock, saved_flags); 807 spin_lock_irqsave(&pf_spin_lock, saved_flags);
808 pf_end_request(1); 808 pf_end_request(0);
809 pf_req = elv_next_request(pf_queue);
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 809 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req) 810 if (!pf_req)
812 return 1; 811 return 1;
813 pf_count = pf_req->current_nr_sectors; 812 pf_count = blk_rq_cur_sectors(pf_req);
814 pf_buf = pf_req->buffer; 813 pf_buf = pf_req->buffer;
815 } 814 }
816 return 0; 815 return 0;
817} 816}
818 817
819static inline void next_request(int success) 818static inline void next_request(int err)
820{ 819{
821 unsigned long saved_flags; 820 unsigned long saved_flags;
822 821
823 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
824 pf_end_request(success); 823 pf_end_request(err);
825 pf_busy = 0; 824 pf_busy = 0;
826 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
827 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
844 pi_do_claimed(pf_current->pi, do_pf_read_start); 843 pi_do_claimed(pf_current->pi, do_pf_read_start);
845 return; 844 return;
846 } 845 }
847 next_request(0); 846 next_request(-EIO);
848 return; 847 return;
849 } 848 }
850 pf_mask = STAT_DRQ; 849 pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
863 pi_do_claimed(pf_current->pi, do_pf_read_start); 862 pi_do_claimed(pf_current->pi, do_pf_read_start);
864 return; 863 return;
865 } 864 }
866 next_request(0); 865 next_request(-EIO);
867 return; 866 return;
868 } 867 }
869 pi_read_block(pf_current->pi, pf_buf, 512); 868 pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
871 break; 870 break;
872 } 871 }
873 pi_disconnect(pf_current->pi); 872 pi_disconnect(pf_current->pi);
874 next_request(1); 873 next_request(0);
875} 874}
876 875
877static void do_pf_write(void) 876static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
890 pi_do_claimed(pf_current->pi, do_pf_write_start); 889 pi_do_claimed(pf_current->pi, do_pf_write_start);
891 return; 890 return;
892 } 891 }
893 next_request(0); 892 next_request(-EIO);
894 return; 893 return;
895 } 894 }
896 895
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
903 pi_do_claimed(pf_current->pi, do_pf_write_start); 902 pi_do_claimed(pf_current->pi, do_pf_write_start);
904 return; 903 return;
905 } 904 }
906 next_request(0); 905 next_request(-EIO);
907 return; 906 return;
908 } 907 }
909 pi_write_block(pf_current->pi, pf_buf, 512); 908 pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
923 pi_do_claimed(pf_current->pi, do_pf_write_start); 922 pi_do_claimed(pf_current->pi, do_pf_write_start);
924 return; 923 return;
925 } 924 }
926 next_request(0); 925 next_request(-EIO);
927 return; 926 return;
928 } 927 }
929 pi_disconnect(pf_current->pi); 928 pi_disconnect(pf_current->pi);
930 next_request(1); 929 next_request(0);
931} 930}
932 931
933static int __init pf_init(void) 932static int __init pf_init(void)
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb9212..338cee4cc0ba 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
134 rq_for_each_segment(bv, req, iter) 134 rq_for_each_segment(bv, req, iter)
135 n++; 135 n++;
136 dev_dbg(&dev->sbd.core, 136 dev_dbg(&dev->sbd.core,
137 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 137 "%s:%u: %s req has %u bvecs for %u sectors\n",
138 __func__, __LINE__, op, n, req->nr_sectors, 138 __func__, __LINE__, op, n, blk_rq_sectors(req));
139 req->hard_nr_sectors);
140#endif 139#endif
141 140
142 start_sector = req->sector * priv->blocking_factor; 141 start_sector = blk_rq_pos(req) * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 142 sectors = blk_rq_sectors(req) * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 144 __func__, __LINE__, op, sectors, start_sector);
146 145
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
158 if (res) { 157 if (res) {
159 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, 158 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
160 __LINE__, op, res); 159 __LINE__, op, res);
161 end_request(req, 0); 160 __blk_end_request_all(req, -EIO);
162 return 0; 161 return 0;
163 } 162 }
164 163
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
180 if (res) { 179 if (res) {
181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", 180 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
182 __func__, __LINE__, res); 181 __func__, __LINE__, res);
183 end_request(req, 0); 182 __blk_end_request_all(req, -EIO);
184 return 0; 183 return 0;
185 } 184 }
186 185
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
195 194
196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 195 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
197 196
198 while ((req = elv_next_request(q))) { 197 while ((req = blk_fetch_request(q))) {
199 if (blk_fs_request(req)) { 198 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 199 if (ps3disk_submit_request_sg(dev, req))
201 break; 200 break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
205 break; 204 break;
206 } else { 205 } else {
207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 206 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
208 end_request(req, 0); 207 __blk_end_request_all(req, -EIO);
209 continue; 208 continue;
210 } 209 }
211 } 210 }
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
231 struct request *req; 230 struct request *req;
232 int res, read, error; 231 int res, read, error;
233 u64 tag, status; 232 u64 tag, status;
234 unsigned long num_sectors;
235 const char *op; 233 const char *op;
236 234
237 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); 235 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 259 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) { 260 req->cmd[0] == REQ_LB_OP_FLUSH) {
263 read = 0; 261 read = 0;
264 num_sectors = req->hard_cur_sectors;
265 op = "flush"; 262 op = "flush";
266 } else { 263 } else {
267 read = !rq_data_dir(req); 264 read = !rq_data_dir(req);
268 num_sectors = req->nr_sectors;
269 op = read ? "read" : "write"; 265 op = read ? "read" : "write";
270 } 266 }
271 if (status) { 267 if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
281 } 277 }
282 278
283 spin_lock(&priv->lock); 279 spin_lock(&priv->lock);
284 __blk_end_request(req, error, num_sectors << 9); 280 __blk_end_request_all(req, error);
285 priv->req = NULL; 281 priv->req = NULL;
286 ps3disk_do_request(dev, priv->queue); 282 ps3disk_do_request(dev, priv->queue);
287 spin_unlock(&priv->lock); 283 spin_unlock(&priv->lock);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe63..cbfd9c0aef03 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213} 213}
214 214
215static void vdc_end_request(struct request *req, int error, int num_sectors)
216{
217 __blk_end_request(req, error, num_sectors << 9);
218}
219
220static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
221 unsigned int index) 216 unsigned int index)
222{ 217{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
239 234
240 rqe->req = NULL; 235 rqe->req = NULL;
241 236
242 vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); 237 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
243 238
244 if (blk_queue_stopped(port->disk->queue)) 239 if (blk_queue_stopped(port->disk->queue))
245 blk_start_queue(port->disk->queue); 240 blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
421 desc->slice = 0; 416 desc->slice = 0;
422 } 417 }
423 desc->status = ~0; 418 desc->status = ~0;
424 desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
425 desc->size = len; 420 desc->size = len;
426 desc->ncookies = err; 421 desc->ncookies = err;
427 422
@@ -446,14 +441,13 @@ out:
446static void do_vdc_request(struct request_queue *q) 441static void do_vdc_request(struct request_queue *q)
447{ 442{
448 while (1) { 443 while (1) {
449 struct request *req = elv_next_request(q); 444 struct request *req = blk_fetch_request(q);
450 445
451 if (!req) 446 if (!req)
452 break; 447 break;
453 448
454 blkdev_dequeue_request(req);
455 if (__send_request(req) < 0) 449 if (__send_request(req) < 0)
456 vdc_end_request(req, -EIO, req->hard_nr_sectors); 450 __blk_end_request_all(req, -EIO);
457 } 451 }
458} 452}
459 453
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc3856937..cf7877fb8a7d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
514 ret = swim_read_sector(fs, side, track, sector, 514 ret = swim_read_sector(fs, side, track, sector,
515 buffer); 515 buffer);
516 if (try-- == 0) 516 if (try-- == 0)
517 return -1; 517 return -EIO;
518 } while (ret != 512); 518 } while (ret != 512);
519 519
520 buffer += ret; 520 buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
528 struct request *req; 528 struct request *req;
529 struct floppy_state *fs; 529 struct floppy_state *fs;
530 530
531 while ((req = elv_next_request(q))) { 531 req = blk_fetch_request(q);
532 while (req) {
533 int err = -EIO;
532 534
533 fs = req->rq_disk->private_data; 535 fs = req->rq_disk->private_data;
534 if (req->sector < 0 || req->sector >= fs->total_secs) { 536 if (blk_rq_pos(req) >= fs->total_secs)
535 end_request(req, 0); 537 goto done;
536 continue; 538 if (!fs->disk_in)
537 } 539 goto done;
538 if (req->current_nr_sectors == 0) { 540 if (rq_data_dir(req) == WRITE && fs->write_protected)
539 end_request(req, 1); 541 goto done;
540 continue; 542
541 }
542 if (!fs->disk_in) {
543 end_request(req, 0);
544 continue;
545 }
546 if (rq_data_dir(req) == WRITE) {
547 if (fs->write_protected) {
548 end_request(req, 0);
549 continue;
550 }
551 }
552 switch (rq_data_dir(req)) { 543 switch (rq_data_dir(req)) {
553 case WRITE: 544 case WRITE:
554 /* NOT IMPLEMENTED */ 545 /* NOT IMPLEMENTED */
555 end_request(req, 0);
556 break; 546 break;
557 case READ: 547 case READ:
558 if (floppy_read_sectors(fs, req->sector, 548 err = floppy_read_sectors(fs, blk_rq_pos(req),
559 req->current_nr_sectors, 549 blk_rq_cur_sectors(req),
560 req->buffer)) { 550 req->buffer);
561 end_request(req, 0);
562 continue;
563 }
564 req->nr_sectors -= req->current_nr_sectors;
565 req->sector += req->current_nr_sectors;
566 req->buffer += req->current_nr_sectors * 512;
567 end_request(req, 1);
568 break; 551 break;
569 } 552 }
553 done:
554 if (!__blk_end_request_cur(req, err))
555 req = blk_fetch_request(q);
570 } 556 }
571} 557}
572 558
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba0..80df93e3cdd0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
254static bool swim3_end_request(int err, unsigned int nr_bytes)
255{
256 if (__blk_end_request(fd_req, err, nr_bytes))
257 return true;
258
259 fd_req = NULL;
260 return false;
261}
262
263static bool swim3_end_request_cur(int err)
264{
265 return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
266}
267
254static void swim3_select(struct floppy_state *fs, int sel) 268static void swim3_select(struct floppy_state *fs, int sel)
255{ 269{
256 struct swim3 __iomem *sw = fs->swim3; 270 struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
310 wake_up(&fs->wait); 324 wake_up(&fs->wait);
311 return; 325 return;
312 } 326 }
313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 327 while (fs->state == idle) {
328 if (!fd_req) {
329 fd_req = blk_fetch_request(swim3_queue);
330 if (!fd_req)
331 break;
332 }
333 req = fd_req;
314#if 0 334#if 0
315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 335 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
316 req->rq_disk->disk_name, req->cmd, 336 req->rq_disk->disk_name, req->cmd,
317 (long)req->sector, req->nr_sectors, req->buffer); 337 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
318 printk(" errors=%d current_nr_sectors=%ld\n", 338 printk(" errors=%d current_nr_sectors=%u\n",
319 req->errors, req->current_nr_sectors); 339 req->errors, blk_rq_cur_sectors(req));
320#endif 340#endif
321 341
322 if (req->sector < 0 || req->sector >= fs->total_secs) { 342 if (blk_rq_pos(req) >= fs->total_secs) {
323 end_request(req, 0); 343 swim3_end_request_cur(-EIO);
324 continue;
325 }
326 if (req->current_nr_sectors == 0) {
327 end_request(req, 1);
328 continue; 344 continue;
329 } 345 }
330 if (fs->ejected) { 346 if (fs->ejected) {
331 end_request(req, 0); 347 swim3_end_request_cur(-EIO);
332 continue; 348 continue;
333 } 349 }
334 350
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
336 if (fs->write_prot < 0) 352 if (fs->write_prot < 0)
337 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 353 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
338 if (fs->write_prot) { 354 if (fs->write_prot) {
339 end_request(req, 0); 355 swim3_end_request_cur(-EIO);
340 continue; 356 continue;
341 } 357 }
342 } 358 }
343 359
344 /* Do not remove the cast. req->sector is now a sector_t and 360 /* Do not remove the cast. blk_rq_pos(req) is now a
345 * can be 64 bits, but it will never go past 32 bits for this 361 * sector_t and can be 64 bits, but it will never go
346 * driver anyway, so we can safely cast it down and not have 362 * past 32 bits for this driver anyway, so we can
347 * to do a 64/32 division 363 * safely cast it down and not have to do a 64/32
364 * division
348 */ 365 */
349 fs->req_cyl = ((long)req->sector) / fs->secpercyl; 366 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
350 x = ((long)req->sector) % fs->secpercyl; 367 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
351 fs->head = x / fs->secpertrack; 368 fs->head = x / fs->secpertrack;
352 fs->req_sector = x % fs->secpertrack + 1; 369 fs->req_sector = x % fs->secpertrack + 1;
353 fd_req = req; 370 fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
424 struct dbdma_cmd *cp = fs->dma_cmd; 441 struct dbdma_cmd *cp = fs->dma_cmd;
425 struct dbdma_regs __iomem *dr = fs->dma; 442 struct dbdma_regs __iomem *dr = fs->dma;
426 443
427 if (fd_req->current_nr_sectors <= 0) { 444 if (blk_rq_cur_sectors(fd_req) <= 0) {
428 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 445 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
429 return; 446 return;
430 } 447 }
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
432 n = 1; 449 n = 1;
433 else { 450 else {
434 n = fs->secpertrack - fs->req_sector + 1; 451 n = fs->secpertrack - fs->req_sector + 1;
435 if (n > fd_req->current_nr_sectors) 452 if (n > blk_rq_cur_sectors(fd_req))
436 n = fd_req->current_nr_sectors; 453 n = blk_rq_cur_sectors(fd_req);
437 } 454 }
438 fs->scount = n; 455 fs->scount = n;
439 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 456 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
508 case do_transfer: 525 case do_transfer:
509 if (fs->cur_cyl != fs->req_cyl) { 526 if (fs->cur_cyl != fs->req_cyl) {
510 if (fs->retries > 5) { 527 if (fs->retries > 5) {
511 end_request(fd_req, 0); 528 swim3_end_request_cur(-EIO);
512 fs->state = idle; 529 fs->state = idle;
513 return; 530 return;
514 } 531 }
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
540 out_8(&sw->intr_enable, 0); 557 out_8(&sw->intr_enable, 0);
541 fs->cur_cyl = -1; 558 fs->cur_cyl = -1;
542 if (fs->retries > 5) { 559 if (fs->retries > 5) {
543 end_request(fd_req, 0); 560 swim3_end_request_cur(-EIO);
544 fs->state = idle; 561 fs->state = idle;
545 start_request(fs); 562 start_request(fs);
546 } else { 563 } else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
559 out_8(&sw->select, RELAX); 576 out_8(&sw->select, RELAX);
560 out_8(&sw->intr_enable, 0); 577 out_8(&sw->intr_enable, 0);
561 printk(KERN_ERR "swim3: seek timeout\n"); 578 printk(KERN_ERR "swim3: seek timeout\n");
562 end_request(fd_req, 0); 579 swim3_end_request_cur(-EIO);
563 fs->state = idle; 580 fs->state = idle;
564 start_request(fs); 581 start_request(fs);
565} 582}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
583 return; 600 return;
584 } 601 }
585 printk(KERN_ERR "swim3: seek settle timeout\n"); 602 printk(KERN_ERR "swim3: seek settle timeout\n");
586 end_request(fd_req, 0); 603 swim3_end_request_cur(-EIO);
587 fs->state = idle; 604 fs->state = idle;
588 start_request(fs); 605 start_request(fs);
589} 606}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
593 struct floppy_state *fs = (struct floppy_state *) data; 610 struct floppy_state *fs = (struct floppy_state *) data;
594 struct swim3 __iomem *sw = fs->swim3; 611 struct swim3 __iomem *sw = fs->swim3;
595 struct dbdma_regs __iomem *dr = fs->dma; 612 struct dbdma_regs __iomem *dr = fs->dma;
596 struct dbdma_cmd *cp = fs->dma_cmd;
597 unsigned long s;
598 int n; 613 int n;
599 614
600 fs->timeout_pending = 0; 615 fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
605 out_8(&sw->intr_enable, 0); 620 out_8(&sw->intr_enable, 0);
606 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 621 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
607 out_8(&sw->select, RELAX); 622 out_8(&sw->select, RELAX);
608 if (rq_data_dir(fd_req) == WRITE)
609 ++cp;
610 if (ld_le16(&cp->xfer_status) != 0)
611 s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
612 else
613 s = 0;
614 fd_req->sector += s;
615 fd_req->current_nr_sectors -= s;
616 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 623 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
617 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 624 (rq_data_dir(fd_req)==WRITE? "writ": "read"),
618 end_request(fd_req, 0); 625 (long)blk_rq_pos(fd_req));
626 swim3_end_request_cur(-EIO);
619 fs->state = idle; 627 fs->state = idle;
620 start_request(fs); 628 start_request(fs);
621} 629}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
646 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 654 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
647 fs->cur_cyl = -1; 655 fs->cur_cyl = -1;
648 if (fs->retries > 5) { 656 if (fs->retries > 5) {
649 end_request(fd_req, 0); 657 swim3_end_request_cur(-EIO);
650 fs->state = idle; 658 fs->state = idle;
651 start_request(fs); 659 start_request(fs);
652 } else { 660 } else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
719 if (intr & ERROR_INTR) { 727 if (intr & ERROR_INTR) {
720 n = fs->scount - 1 - resid / 512; 728 n = fs->scount - 1 - resid / 512;
721 if (n > 0) { 729 if (n > 0) {
722 fd_req->sector += n; 730 blk_update_request(fd_req, 0, n << 9);
723 fd_req->current_nr_sectors -= n;
724 fd_req->buffer += n * 512;
725 fs->req_sector += n; 731 fs->req_sector += n;
726 } 732 }
727 if (fs->retries < 5) { 733 if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
730 } else { 736 } else {
731 printk("swim3: error %sing block %ld (err=%x)\n", 737 printk("swim3: error %sing block %ld (err=%x)\n",
732 rq_data_dir(fd_req) == WRITE? "writ": "read", 738 rq_data_dir(fd_req) == WRITE? "writ": "read",
733 (long)fd_req->sector, err); 739 (long)blk_rq_pos(fd_req), err);
734 end_request(fd_req, 0); 740 swim3_end_request_cur(-EIO);
735 fs->state = idle; 741 fs->state = idle;
736 } 742 }
737 } else { 743 } else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
740 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 746 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
741 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 747 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
742 fs->state, rq_data_dir(fd_req), intr, err); 748 fs->state, rq_data_dir(fd_req), intr, err);
743 end_request(fd_req, 0); 749 swim3_end_request_cur(-EIO);
744 fs->state = idle; 750 fs->state = idle;
745 start_request(fs); 751 start_request(fs);
746 break; 752 break;
747 } 753 }
748 fd_req->sector += fs->scount; 754 if (swim3_end_request(0, fs->scount << 9)) {
749 fd_req->current_nr_sectors -= fs->scount;
750 fd_req->buffer += fs->scount * 512;
751 if (fd_req->current_nr_sectors <= 0) {
752 end_request(fd_req, 1);
753 fs->state = idle;
754 } else {
755 fs->req_sector += fs->scount; 755 fs->req_sector += fs->scount;
756 if (fs->req_sector > fs->secpertrack) { 756 if (fs->req_sector > fs->secpertrack) {
757 fs->req_sector -= fs->secpertrack; 757 fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
761 } 761 }
762 } 762 }
763 act(fs); 763 act(fs);
764 } 764 } else
765 fs->state = idle;
765 } 766 }
766 if (fs->state == idle) 767 if (fs->state == idle)
767 start_request(fs); 768 start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf03..da403b6a7f43 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = __blk_end_request(req, error, blk_rq_bytes(req)); 752 __blk_end_request_all(req, error);
753 assert(rc == 0);
754 753
755 rc = carm_put_request(host, crq); 754 rc = carm_put_request(host, crq);
756 assert(rc == 0); 755 assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
811 810
812 while (1) { 811 while (1) {
813 DPRINTK("get req\n"); 812 DPRINTK("get req\n");
814 rq = elv_next_request(q); 813 rq = blk_fetch_request(q);
815 if (!rq) 814 if (!rq)
816 break; 815 break;
817 816
818 blkdev_dequeue_request(rq);
819
820 crq = rq->special; 817 crq = rq->special;
821 assert(crq != NULL); 818 assert(crq != NULL);
822 assert(crq->rq == rq); 819 assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
847 844
848queue_one_request: 845queue_one_request:
849 VPRINTK("get req\n"); 846 VPRINTK("get req\n");
850 rq = elv_next_request(q); 847 rq = blk_peek_request(q);
851 if (!rq) 848 if (!rq)
852 return; 849 return;
853 850
@@ -858,7 +855,7 @@ queue_one_request:
858 } 855 }
859 crq->rq = rq; 856 crq->rq = rq;
860 857
861 blkdev_dequeue_request(rq); 858 blk_start_request(rq);
862 859
863 if (rq_data_dir(rq) == WRITE) { 860 if (rq_data_dir(rq) == WRITE) {
864 writing = 1; 861 writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
904 msg->sg_count = n_elem; 901 msg->sg_count = n_elem;
905 msg->sg_type = SGT_32BIT; 902 msg->sg_type = SGT_32BIT;
906 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 903 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
907 msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 904 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
908 tmp = (rq->sector >> 16) >> 16; 905 tmp = (blk_rq_pos(rq) >> 16) >> 16;
909 msg->lba_high = cpu_to_le16( (u16) tmp ); 906 msg->lba_high = cpu_to_le16( (u16) tmp );
910 msg->lba_count = cpu_to_le16(rq->nr_sectors); 907 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
911 908
912 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 909 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
913 for (i = 0; i < n_elem; i++) { 910 for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac890..e67bbae9547d 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq); 361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
363static void ub_end_rq(struct request *rq, unsigned int status, 363static void ub_end_rq(struct request *rq, unsigned int status);
364 unsigned int cmd_len);
365static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
366 struct ub_request *urq, struct ub_scsi_cmd *cmd); 365 struct ub_request *urq, struct ub_scsi_cmd *cmd);
367static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
627 struct ub_lun *lun = q->queuedata; 626 struct ub_lun *lun = q->queuedata;
628 struct request *rq; 627 struct request *rq;
629 628
630 while ((rq = elv_next_request(q)) != NULL) { 629 while ((rq = blk_peek_request(q)) != NULL) {
631 if (ub_request_fn_1(lun, rq) != 0) { 630 if (ub_request_fn_1(lun, rq) != 0) {
632 blk_stop_queue(q); 631 blk_stop_queue(q);
633 break; 632 break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
643 int n_elem; 642 int n_elem;
644 643
645 if (atomic_read(&sc->poison)) { 644 if (atomic_read(&sc->poison)) {
646 blkdev_dequeue_request(rq); 645 blk_start_request(rq);
647 ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); 646 ub_end_rq(rq, DID_NO_CONNECT << 16);
648 return 0; 647 return 0;
649 } 648 }
650 649
651 if (lun->changed && !blk_pc_request(rq)) { 650 if (lun->changed && !blk_pc_request(rq)) {
652 blkdev_dequeue_request(rq); 651 blk_start_request(rq);
653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); 652 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
654 return 0; 653 return 0;
655 } 654 }
656 655
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
660 return -1; 659 return -1;
661 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 660 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
662 661
663 blkdev_dequeue_request(rq); 662 blk_start_request(rq);
664 663
665 urq = &lun->urq; 664 urq = &lun->urq;
666 memset(urq, 0, sizeof(struct ub_request)); 665 memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
702 701
703drop: 702drop:
704 ub_put_cmd(lun, cmd); 703 ub_put_cmd(lun, cmd);
705 ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); 704 ub_end_rq(rq, DID_ERROR << 16);
706 return 0; 705 return 0;
707} 706}
708 707
@@ -726,8 +725,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
726 * The call to blk_queue_hardsect_size() guarantees that request 725 * The call to blk_queue_hardsect_size() guarantees that request
727 * is aligned, but it is given in terms of 512 byte units, always. 726 * is aligned, but it is given in terms of 512 byte units, always.
728 */ 727 */
729 block = rq->sector >> lun->capacity.bshift; 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
730 nblks = rq->nr_sectors >> lun->capacity.bshift; 729 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
731 730
732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 731 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 732 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
739 cmd->cdb[8] = nblks; 738 cmd->cdb[8] = nblks;
740 cmd->cdb_len = 10; 739 cmd->cdb_len = 10;
741 740
742 cmd->len = rq->nr_sectors * 512; 741 cmd->len = blk_rq_bytes(rq);
743} 742}
744 743
745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
747{ 746{
748 struct request *rq = urq->rq; 747 struct request *rq = urq->rq;
749 748
750 if (rq->data_len == 0) { 749 if (blk_rq_bytes(rq) == 0) {
751 cmd->dir = UB_DIR_NONE; 750 cmd->dir = UB_DIR_NONE;
752 } else { 751 } else {
753 if (rq_data_dir(rq) == WRITE) 752 if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 761 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
763 cmd->cdb_len = rq->cmd_len; 762 cmd->cdb_len = rq->cmd_len;
764 763
765 cmd->len = rq->data_len; 764 cmd->len = blk_rq_bytes(rq);
766 765
767 /* 766 /*
768 * To reapply this to every URB is not as incorrect as it looks. 767 * To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
777 struct ub_request *urq = cmd->back; 776 struct ub_request *urq = cmd->back;
778 struct request *rq; 777 struct request *rq;
779 unsigned int scsi_status; 778 unsigned int scsi_status;
780 unsigned int cmd_len;
781 779
782 rq = urq->rq; 780 rq = urq->rq;
783 781
784 if (cmd->error == 0) { 782 if (cmd->error == 0) {
785 if (blk_pc_request(rq)) { 783 if (blk_pc_request(rq)) {
786 if (cmd->act_len >= rq->data_len) 784 if (cmd->act_len >= rq->resid_len)
787 rq->data_len = 0; 785 rq->resid_len = 0;
788 else 786 else
789 rq->data_len -= cmd->act_len; 787 rq->resid_len -= cmd->act_len;
790 scsi_status = 0; 788 scsi_status = 0;
791 } else { 789 } else {
792 if (cmd->act_len != cmd->len) { 790 if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
818 816
819 urq->rq = NULL; 817 urq->rq = NULL;
820 818
821 cmd_len = cmd->len;
822 ub_put_cmd(lun, cmd); 819 ub_put_cmd(lun, cmd);
823 ub_end_rq(rq, scsi_status, cmd_len); 820 ub_end_rq(rq, scsi_status);
824 blk_start_queue(lun->disk->queue); 821 blk_start_queue(lun->disk->queue);
825} 822}
826 823
827static void ub_end_rq(struct request *rq, unsigned int scsi_status, 824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
828 unsigned int cmd_len)
829{ 825{
830 int error; 826 int error;
831 long rqlen;
832 827
833 if (scsi_status == 0) { 828 if (scsi_status == 0) {
834 error = 0; 829 error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
836 error = -EIO; 831 error = -EIO;
837 rq->errors = scsi_status; 832 rq->errors = scsi_status;
838 } 833 }
839 rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ 834 __blk_end_request_all(rq, error);
840 if (__blk_end_request(rq, error, cmd_len)) {
841 printk(KERN_WARNING DRV_NAME
842 ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
843 blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
844 }
845} 835}
846 836
847static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2f..390d69bb7c48 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
252 struct viodasd_device *d; 252 struct viodasd_device *d;
253 unsigned long flags; 253 unsigned long flags;
254 254
255 start = (u64)req->sector << 9; 255 start = (u64)blk_rq_pos(req) << 9;
256 256
257 if (rq_data_dir(req) == READ) { 257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE; 258 direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
361 * back later. 361 * back later.
362 */ 362 */
363 while (num_req_outstanding < VIOMAXREQ) { 363 while (num_req_outstanding < VIOMAXREQ) {
364 req = elv_next_request(q); 364 req = blk_fetch_request(q);
365 if (req == NULL) 365 if (req == NULL)
366 return; 366 return;
367 /* dequeue the current request from the queue */
368 blkdev_dequeue_request(req);
369 /* check that request contains a valid command */ 367 /* check that request contains a valid command */
370 if (!blk_fs_request(req)) { 368 if (!blk_fs_request(req)) {
371 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 369 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
372 continue; 370 continue;
373 } 371 }
374 /* Try sending the request */ 372 /* Try sending the request */
375 if (send_request(req) != 0) 373 if (send_request(req) != 0)
376 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 374 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
377 } 375 }
378} 376}
379 377
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 588 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 589 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
592 event->xRc, bevent->sub_result, err->msg); 590 event->xRc, bevent->sub_result, err->msg);
593 num_sect = req->hard_nr_sectors; 591 num_sect = blk_rq_sectors(req);
594 } 592 }
595 qlock = req->q->queue_lock; 593 qlock = req->q->queue_lock;
596 spin_lock_irqsave(qlock, irq_flags); 594 spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a87..511d4ae2d176 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
37 struct list_head list; 37 struct list_head list;
38 struct request *req; 38 struct request *req;
39 struct virtio_blk_outhdr out_hdr; 39 struct virtio_blk_outhdr out_hdr;
40 struct virtio_scsi_inhdr in_hdr;
40 u8 status; 41 u8 status;
41}; 42};
42 43
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
50 spin_lock_irqsave(&vblk->lock, flags); 51 spin_lock_irqsave(&vblk->lock, flags);
51 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 52 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
52 int error; 53 int error;
54
53 switch (vbr->status) { 55 switch (vbr->status) {
54 case VIRTIO_BLK_S_OK: 56 case VIRTIO_BLK_S_OK:
55 error = 0; 57 error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
62 break; 64 break;
63 } 65 }
64 66
65 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 67 if (blk_pc_request(vbr->req)) {
68 vbr->req->resid_len = vbr->in_hdr.residual;
69 vbr->req->sense_len = vbr->in_hdr.sense_len;
70 vbr->req->errors = vbr->in_hdr.errors;
71 }
72
73 __blk_end_request_all(vbr->req, error);
66 list_del(&vbr->list); 74 list_del(&vbr->list);
67 mempool_free(vbr, vblk->pool); 75 mempool_free(vbr, vblk->pool);
68 } 76 }
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
74static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 82static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
75 struct request *req) 83 struct request *req)
76{ 84{
77 unsigned long num, out, in; 85 unsigned long num, out = 0, in = 0;
78 struct virtblk_req *vbr; 86 struct virtblk_req *vbr;
79 87
80 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 88 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
85 vbr->req = req; 93 vbr->req = req;
86 if (blk_fs_request(vbr->req)) { 94 if (blk_fs_request(vbr->req)) {
87 vbr->out_hdr.type = 0; 95 vbr->out_hdr.type = 0;
88 vbr->out_hdr.sector = vbr->req->sector; 96 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 97 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
90 } else if (blk_pc_request(vbr->req)) { 98 } else if (blk_pc_request(vbr->req)) {
91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 99 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
99 if (blk_barrier_rq(vbr->req)) 107 if (blk_barrier_rq(vbr->req))
100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 108 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
101 109
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 110 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
105 111
106 if (rq_data_dir(vbr->req) == WRITE) { 112 /*
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 113 * If this is a packet command we need a couple of additional headers.
108 out = 1 + num; 114 * Behind the normal outhdr we put a segment with the scsi command
109 in = 1; 115 * block, and before the normal inhdr we put the sense data and the
110 } else { 116 * inhdr with additional status information before the normal inhdr.
111 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 117 */
112 out = 1; 118 if (blk_pc_request(vbr->req))
113 in = 1 + num; 119 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
120
121 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
122
123 if (blk_pc_request(vbr->req)) {
124 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
125 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
126 sizeof(vbr->in_hdr));
127 }
128
129 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
130 sizeof(vbr->status));
131
132 if (num) {
133 if (rq_data_dir(vbr->req) == WRITE) {
134 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
135 out += num;
136 } else {
137 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
138 in += num;
139 }
114 } 140 }
115 141
116 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { 142 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
124 150
125static void do_virtblk_request(struct request_queue *q) 151static void do_virtblk_request(struct request_queue *q)
126{ 152{
127 struct virtio_blk *vblk = NULL; 153 struct virtio_blk *vblk = q->queuedata;
128 struct request *req; 154 struct request *req;
129 unsigned int issued = 0; 155 unsigned int issued = 0;
130 156
131 while ((req = elv_next_request(q)) != NULL) { 157 while ((req = blk_peek_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 158 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 159
135 /* If this request fails, stop queue and wait for something to 160 /* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
138 blk_stop_queue(q); 163 blk_stop_queue(q);
139 break; 164 break;
140 } 165 }
141 blkdev_dequeue_request(req); 166 blk_start_request(req);
142 issued++; 167 issued++;
143 } 168 }
144 169
@@ -149,8 +174,16 @@ static void do_virtblk_request(struct request_queue *q)
149static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 174static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
150 unsigned cmd, unsigned long data) 175 unsigned cmd, unsigned long data)
151{ 176{
152 return scsi_cmd_ioctl(bdev->bd_disk->queue, 177 struct gendisk *disk = bdev->bd_disk;
153 bdev->bd_disk, mode, cmd, 178 struct virtio_blk *vblk = disk->private_data;
179
180 /*
181 * Only allow the generic SCSI ioctls if the host can support it.
182 */
183 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
184 return -ENOIOCTLCMD;
185
186 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
154 (void __user *)data); 187 (void __user *)data);
155} 188}
156 189
@@ -249,6 +282,7 @@ static int virtblk_probe(struct virtio_device *vdev)
249 goto out_put_disk; 282 goto out_put_disk;
250 } 283 }
251 284
285 vblk->disk->queue->queuedata = vblk;
252 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); 286 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
253 287
254 if (index < 26) { 288 if (index < 26) {
@@ -356,6 +390,7 @@ static struct virtio_device_id id_table[] = {
356static unsigned int features[] = { 390static unsigned int features[] = {
357 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 391 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
358 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 392 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
393 VIRTIO_BLK_F_SCSI,
359}; 394};
360 395
361static struct virtio_driver virtio_blk = { 396static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98b..ce2429219925 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
305 if (xdc_busy) 305 if (xdc_busy)
306 return; 306 return;
307 307
308 while ((req = elv_next_request(q)) != NULL) { 308 req = blk_fetch_request(q);
309 unsigned block = req->sector; 309 while (req) {
310 unsigned count = req->nr_sectors; 310 unsigned block = blk_rq_pos(req);
311 int rw = rq_data_dir(req); 311 unsigned count = blk_rq_cur_sectors(req);
312 XD_INFO *disk = req->rq_disk->private_data; 312 XD_INFO *disk = req->rq_disk->private_data;
313 int res = 0; 313 int res = -EIO;
314 int retry; 314 int retry;
315 315
316 if (!blk_fs_request(req)) { 316 if (!blk_fs_request(req))
317 end_request(req, 0); 317 goto done;
318 continue; 318 if (block + count > get_capacity(req->rq_disk))
319 } 319 goto done;
320 if (block + count > get_capacity(req->rq_disk)) {
321 end_request(req, 0);
322 continue;
323 }
324 if (rw != READ && rw != WRITE) {
325 printk("do_xd_request: unknown request\n");
326 end_request(req, 0);
327 continue;
328 }
329 for (retry = 0; (retry < XD_RETRIES) && !res; retry++) 320 for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
330 res = xd_readwrite(rw, disk, req->buffer, block, count); 321 res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
331 end_request(req, res); /* wrap up, 0 = fail, 1 = success */ 322 block, count);
323 done:
324 /* wrap up, 0 = success, -errno = fail */
325 if (!__blk_end_request_cur(req, res))
326 req = blk_fetch_request(q);
332 } 327 }
333} 328}
334 329
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
418 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); 413 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
419 xd_recalibrate(drive); 414 xd_recalibrate(drive);
420 spin_lock_irq(&xd_lock); 415 spin_lock_irq(&xd_lock);
421 return (0); 416 return -EIO;
422 case 2: 417 case 2:
423 if (sense[0] & 0x30) { 418 if (sense[0] & 0x30) {
424 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); 419 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
439 else 434 else
440 printk(" - no valid disk address\n"); 435 printk(" - no valid disk address\n");
441 spin_lock_irq(&xd_lock); 436 spin_lock_irq(&xd_lock);
442 return (0); 437 return -EIO;
443 } 438 }
444 if (xd_dma_buffer) 439 if (xd_dma_buffer)
445 for (i=0; i < (temp * 0x200); i++) 440 for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
448 count -= temp, buffer += temp * 0x200, block += temp; 443 count -= temp, buffer += temp * 0x200, block += temp;
449 } 444 }
450 spin_lock_irq(&xd_lock); 445 spin_lock_irq(&xd_lock);
451 return (1); 446 return 0;
452} 447}
453 448
454/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ 449/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a6cbf7b808e6..132120ae4bde 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
122static int get_id_from_freelist(struct blkfront_info *info) 122static int get_id_from_freelist(struct blkfront_info *info)
123{ 123{
124 unsigned long free = info->shadow_free; 124 unsigned long free = info->shadow_free;
125 BUG_ON(free > BLK_RING_SIZE); 125 BUG_ON(free >= BLK_RING_SIZE);
126 info->shadow_free = info->shadow[free].req.id; 126 info->shadow_free = info->shadow[free].req.id;
127 info->shadow[free].req.id = 0x0fffffee; /* debug */ 127 info->shadow[free].req.id = 0x0fffffee; /* debug */
128 return free; 128 return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
231 info->shadow[id].request = (unsigned long)req; 231 info->shadow[id].request = (unsigned long)req;
232 232
233 ring_req->id = id; 233 ring_req->id = id;
234 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
235 ring_req->handle = info->handle; 235 ring_req->handle = info->handle;
236 236
237 ring_req->operation = rq_data_dir(req) ? 237 ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
299 299
300 queued = 0; 300 queued = 0;
301 301
302 while ((req = elv_next_request(rq)) != NULL) { 302 while ((req = blk_peek_request(rq)) != NULL) {
303 info = req->rq_disk->private_data; 303 info = req->rq_disk->private_data;
304 if (!blk_fs_request(req)) {
305 end_request(req, 0);
306 continue;
307 }
308 304
309 if (RING_FULL(&info->ring)) 305 if (RING_FULL(&info->ring))
310 goto wait; 306 goto wait;
311 307
312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 308 blk_start_request(req);
313 "(%u/%li) buffer:%p [%s]\n",
314 req, req->cmd, (unsigned long)req->sector,
315 req->current_nr_sectors,
316 req->nr_sectors, req->buffer,
317 rq_data_dir(req) ? "write" : "read");
318 309
310 if (!blk_fs_request(req)) {
311 __blk_end_request_all(req, -EIO);
312 continue;
313 }
314
315 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
316 "(%u/%u) buffer:%p [%s]\n",
317 req, req->cmd, (unsigned long)blk_rq_pos(req),
318 blk_rq_cur_sectors(req), blk_rq_sectors(req),
319 req->buffer, rq_data_dir(req) ? "write" : "read");
319 320
320 blkdev_dequeue_request(req);
321 if (blkif_queue_request(req)) { 321 if (blkif_queue_request(req)) {
322 blk_requeue_request(rq, req); 322 blk_requeue_request(rq, req);
323wait: 323wait:
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
551 551
552 for (i = info->ring.rsp_cons; i != rp; i++) { 552 for (i = info->ring.rsp_cons; i != rp; i++) {
553 unsigned long id; 553 unsigned long id;
554 int ret;
555 554
556 bret = RING_GET_RESPONSE(&info->ring, i); 555 bret = RING_GET_RESPONSE(&info->ring, i);
557 id = bret->id; 556 id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 577 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
579 "request: %x\n", bret->status); 578 "request: %x\n", bret->status);
580 579
581 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 580 __blk_end_request_all(req, error);
582 BUG_ON(ret);
583 break; 581 break;
584 default: 582 default:
585 BUG(); 583 BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a93..3a4397edab71 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
463{ 463{
464 struct request *req; 464 struct request *req;
465 465
466 while ((req = elv_next_request(q)) != NULL) { 466 while ((req = blk_peek_request(q)) != NULL) {
467 if (blk_fs_request(req)) 467 if (blk_fs_request(req))
468 break; 468 break;
469 end_request(req, 0); 469 blk_start_request(req);
470 __blk_end_request_all(req, -EIO);
470 } 471 }
471 return req; 472 return req;
472} 473}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
492 set_capacity(ace->gd, 0); 493 set_capacity(ace->gd, 0);
493 dev_info(ace->dev, "No CF in slot\n"); 494 dev_info(ace->dev, "No CF in slot\n");
494 495
495 /* Drop all pending requests */ 496 /* Drop all in-flight and pending requests */
496 while ((req = elv_next_request(ace->queue)) != NULL) 497 if (ace->req) {
497 end_request(req, 0); 498 __blk_end_request_all(ace->req, -EIO);
499 ace->req = NULL;
500 }
501 while ((req = blk_fetch_request(ace->queue)) != NULL)
502 __blk_end_request_all(req, -EIO);
498 503
499 /* Drop back to IDLE state and notify waiters */ 504 /* Drop back to IDLE state and notify waiters */
500 ace->fsm_state = ACE_FSM_STATE_IDLE; 505 ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
642 ace->fsm_state = ACE_FSM_STATE_IDLE; 647 ace->fsm_state = ACE_FSM_STATE_IDLE;
643 break; 648 break;
644 } 649 }
650 blk_start_request(req);
645 651
646 /* Okay, it's a data request, set it up for transfer */ 652 /* Okay, it's a data request, set it up for transfer */
647 dev_dbg(ace->dev, 653 dev_dbg(ace->dev,
648 "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", 654 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
649 (unsigned long long) req->sector, req->hard_nr_sectors, 655 (unsigned long long)blk_rq_pos(req),
650 req->current_nr_sectors, rq_data_dir(req)); 656 blk_rq_sectors(req), blk_rq_cur_sectors(req),
657 rq_data_dir(req));
651 658
652 ace->req = req; 659 ace->req = req;
653 ace->data_ptr = req->buffer; 660 ace->data_ptr = req->buffer;
654 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 661 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
655 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 662 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
656 663
657 count = req->hard_nr_sectors; 664 count = blk_rq_sectors(req);
658 if (rq_data_dir(req)) { 665 if (rq_data_dir(req)) {
659 /* Kick off write request */ 666 /* Kick off write request */
660 dev_dbg(ace->dev, "write data\n"); 667 dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
688 dev_dbg(ace->dev, 695 dev_dbg(ace->dev,
689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 696 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
690 ace->fsm_task, ace->fsm_iter_num, 697 ace->fsm_task, ace->fsm_iter_num,
691 ace->req->current_nr_sectors * 16, 698 blk_rq_cur_sectors(ace->req) * 16,
692 ace->data_count, ace->in_irq); 699 ace->data_count, ace->in_irq);
693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 700 ace_fsm_yield(ace); /* need to poll CFBSY bit */
694 break; 701 break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
697 dev_dbg(ace->dev, 704 dev_dbg(ace->dev,
698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 705 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
699 ace->fsm_task, ace->fsm_iter_num, 706 ace->fsm_task, ace->fsm_iter_num,
700 ace->req->current_nr_sectors * 16, 707 blk_rq_cur_sectors(ace->req) * 16,
701 ace->data_count, ace->in_irq); 708 ace->data_count, ace->in_irq);
702 ace_fsm_yieldirq(ace); 709 ace_fsm_yieldirq(ace);
703 break; 710 break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
717 } 724 }
718 725
719 /* bio finished; is there another one? */ 726 /* bio finished; is there another one? */
720 if (__blk_end_request(ace->req, 0, 727 if (__blk_end_request_cur(ace->req, 0)) {
721 blk_rq_cur_bytes(ace->req))) { 728 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
722 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 729 * blk_rq_sectors(ace->req),
723 * ace->req->hard_nr_sectors, 730 * blk_rq_cur_sectors(ace->req));
724 * ace->req->current_nr_sectors);
725 */ 731 */
726 ace->data_ptr = ace->req->buffer; 732 ace->data_ptr = ace->req->buffer;
727 ace->data_count = ace->req->current_nr_sectors * 16; 733 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
728 ace_fsm_yieldirq(ace); 734 ace_fsm_yieldirq(ace);
729 break; 735 break;
730 } 736 }
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd3119..4575171e5beb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
70static void do_z2_request(struct request_queue *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73
74 unsigned long start = req->sector << 9; 74 req = blk_fetch_request(q);
75 unsigned long len = req->current_nr_sectors << 9; 75 while (req) {
76 unsigned long start = blk_rq_pos(req) << 9;
77 unsigned long len = blk_rq_cur_bytes(req);
78 int err = 0;
76 79
77 if (start + len > z2ram_size) { 80 if (start + len > z2ram_size) {
78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
79 req->sector, req->current_nr_sectors); 82 blk_rq_pos(req), blk_rq_cur_sectors(req));
80 end_request(req, 0); 83 err = -EIO;
81 continue; 84 goto done;
82 } 85 }
83 while (len) { 86 while (len) {
84 unsigned long addr = start & Z2RAM_CHUNKMASK; 87 unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
93 start += size; 96 start += size;
94 len -= size; 97 len -= size;
95 } 98 }
96 end_request(req, 1); 99 done:
100 if (!__blk_end_request_cur(req, err))
101 req = blk_fetch_request(q);
97 } 102 }
98} 103}
99 104
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437b..1e366ad8f680 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
584 list_for_each_safe(elem, next, &gdrom_deferred) { 584 list_for_each_safe(elem, next, &gdrom_deferred) {
585 req = list_entry(elem, struct request, queuelist); 585 req = list_entry(elem, struct request, queuelist);
586 spin_unlock(&gdrom_lock); 586 spin_unlock(&gdrom_lock);
587 block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 587 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
588 block_cnt = req->nr_sectors/GD_TO_BLK; 588 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
632 * before handling ending the request */ 632 * before handling ending the request */
633 spin_lock(&gdrom_lock); 633 spin_lock(&gdrom_lock);
634 list_del_init(&req->queuelist); 634 list_del_init(&req->queuelist);
635 __blk_end_request(req, err, blk_rq_bytes(req)); 635 __blk_end_request_all(req, err);
636 } 636 }
637 spin_unlock(&gdrom_lock); 637 spin_unlock(&gdrom_lock);
638 kfree(read_command); 638 kfree(read_command);
639} 639}
640 640
641static void gdrom_request_handler_dma(struct request *req)
642{
643 /* dequeue, add to list of deferred work
644 * and then schedule workqueue */
645 blkdev_dequeue_request(req);
646 list_add_tail(&req->queuelist, &gdrom_deferred);
647 schedule_work(&work);
648}
649
650static void gdrom_request(struct request_queue *rq) 641static void gdrom_request(struct request_queue *rq)
651{ 642{
652 struct request *req; 643 struct request *req;
653 644
654 while ((req = elv_next_request(rq)) != NULL) { 645 while ((req = blk_fetch_request(rq)) != NULL) {
655 if (!blk_fs_request(req)) { 646 if (!blk_fs_request(req)) {
656 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
657 end_request(req, 0); 648 __blk_end_request_all(req, -EIO);
649 continue;
658 } 650 }
659 if (rq_data_dir(req) != READ) { 651 if (rq_data_dir(req) != READ) {
660 printk(KERN_NOTICE "GDROM: Read only device -"); 652 printk(KERN_NOTICE "GDROM: Read only device -");
661 printk(" write request ignored\n"); 653 printk(" write request ignored\n");
662 end_request(req, 0); 654 __blk_end_request_all(req, -EIO);
655 continue;
663 } 656 }
664 if (req->nr_sectors) 657
665 gdrom_request_handler_dma(req); 658 /*
666 else 659 * Add to list of deferred work and then schedule
667 end_request(req, 0); 660 * workqueue.
661 */
662 list_add_tail(&req->queuelist, &gdrom_deferred);
663 schedule_work(&work);
668 } 664 }
669} 665}
670 666
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9b1624e0ddeb..f177c2d4017f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
282 viopath_targetinst(viopath_hostLp), 282 viopath_targetinst(viopath_hostLp),
283 (u64)req, VIOVERSION << 16, 283 (u64)req, VIOVERSION << 16,
284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
285 (u64)req->sector * 512, len, 0); 285 (u64)blk_rq_pos(req) * 512, len, 0);
286 if (hvrc != HvLpEvent_Rc_Good) { 286 if (hvrc != HvLpEvent_Rc_Good) {
287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
288 return -1; 288 return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
291 return 0; 291 return 0;
292} 292}
293 293
294static void viocd_end_request(struct request *req, int error)
295{
296 int nsectors = req->hard_nr_sectors;
297
298 /*
299 * Make sure it's fully ended, and ensure that we process
300 * at least one sector.
301 */
302 if (blk_pc_request(req))
303 nsectors = (req->data_len + 511) >> 9;
304 if (!nsectors)
305 nsectors = 1;
306
307 if (__blk_end_request(req, error, nsectors << 9))
308 BUG();
309}
310
311static int rwreq; 294static int rwreq;
312 295
313static void do_viocd_request(struct request_queue *q) 296static void do_viocd_request(struct request_queue *q)
314{ 297{
315 struct request *req; 298 struct request *req;
316 299
317 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
318 if (!blk_fs_request(req)) 301 if (!blk_fs_request(req))
319 viocd_end_request(req, -EIO); 302 __blk_end_request_all(req, -EIO);
320 else if (send_request(req) < 0) { 303 else if (send_request(req) < 0) {
321 printk(VIOCD_KERN_WARNING 304 printk(VIOCD_KERN_WARNING
322 "unable to send message to OS/400!"); 305 "unable to send message to OS/400!");
323 viocd_end_request(req, -EIO); 306 __blk_end_request_all(req, -EIO);
324 } else 307 } else
325 rwreq++; 308 rwreq++;
326 } 309 }
@@ -531,9 +514,9 @@ return_complete:
531 "with rc %d:0x%04X: %s\n", 514 "with rc %d:0x%04X: %s\n",
532 req, event->xRc, 515 req, event->xRc,
533 bevent->sub_result, err->msg); 516 bevent->sub_result, err->msg);
534 viocd_end_request(req, -EIO); 517 __blk_end_request_all(req, -EIO);
535 } else 518 } else
536 viocd_end_request(req, 0); 519 __blk_end_request_all(req, 0);
537 520
538 /* restart handling of incoming requests */ 521 /* restart handling of incoming requests */
539 spin_unlock_irqrestore(&viocd_reqlock, flags); 522 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 7201b176d75b..8a894fa37b53 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -80,34 +80,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
80EXPORT_SYMBOL_GPL(ide_init_pc); 80EXPORT_SYMBOL_GPL(ide_init_pc);
81 81
82/* 82/*
83 * Generate a new packet command request in front of the request queue, before
84 * the current request, so that it will be processed immediately, on the next
85 * pass through the driver.
86 */
87static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
88 struct ide_atapi_pc *pc, struct request *rq)
89{
90 blk_rq_init(NULL, rq);
91 rq->cmd_type = REQ_TYPE_SPECIAL;
92 rq->cmd_flags |= REQ_PREEMPT;
93 rq->buffer = (char *)pc;
94 rq->rq_disk = disk;
95
96 if (pc->req_xfer) {
97 rq->data = pc->buf;
98 rq->data_len = pc->req_xfer;
99 }
100
101 memcpy(rq->cmd, pc->c, 12);
102 if (drive->media == ide_tape)
103 rq->cmd[13] = REQ_IDETAPE_PC1;
104
105 drive->hwif->rq = NULL;
106
107 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
108}
109
110/*
111 * Add a special packet command request to the tail of the request queue, 83 * Add a special packet command request to the tail of the request queue,
112 * and wait for it to be serviced. 84 * and wait for it to be serviced.
113 */ 85 */
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
119 91
120 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 92 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
121 rq->cmd_type = REQ_TYPE_SPECIAL; 93 rq->cmd_type = REQ_TYPE_SPECIAL;
122 rq->buffer = (char *)pc; 94 rq->special = (char *)pc;
123 95
124 if (pc->req_xfer) { 96 if (pc->req_xfer) {
125 rq->data = pc->buf; 97 error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
126 rq->data_len = pc->req_xfer; 98 GFP_NOIO);
99 if (error)
100 goto put_req;
127 } 101 }
128 102
129 memcpy(rq->cmd, pc->c, 12); 103 memcpy(rq->cmd, pc->c, 12);
130 if (drive->media == ide_tape) 104 if (drive->media == ide_tape)
131 rq->cmd[13] = REQ_IDETAPE_PC1; 105 rq->cmd[13] = REQ_IDETAPE_PC1;
132 error = blk_execute_rq(drive->queue, disk, rq, 0); 106 error = blk_execute_rq(drive->queue, disk, rq, 0);
107put_req:
133 blk_put_request(rq); 108 blk_put_request(rq);
134
135 return error; 109 return error;
136} 110}
137EXPORT_SYMBOL_GPL(ide_queue_pc_tail); 111EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
191} 165}
192EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); 166EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
193 167
168void ide_prep_sense(ide_drive_t *drive, struct request *rq)
169{
170 struct request_sense *sense = &drive->sense_data;
171 struct request *sense_rq = &drive->sense_rq;
172 unsigned int cmd_len, sense_len;
173 int err;
174
175 debug_log("%s: enter\n", __func__);
176
177 switch (drive->media) {
178 case ide_floppy:
179 cmd_len = 255;
180 sense_len = 18;
181 break;
182 case ide_tape:
183 cmd_len = 20;
184 sense_len = 20;
185 break;
186 default:
187 cmd_len = 18;
188 sense_len = 18;
189 }
190
191 BUG_ON(sense_len > sizeof(*sense));
192
193 if (blk_sense_request(rq) || drive->sense_rq_armed)
194 return;
195
196 memset(sense, 0, sizeof(*sense));
197
198 blk_rq_init(rq->q, sense_rq);
199
200 err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
201 GFP_NOIO);
202 if (unlikely(err)) {
203 if (printk_ratelimit())
204 printk(KERN_WARNING "%s: failed to map sense buffer\n",
205 drive->name);
206 return;
207 }
208
209 sense_rq->rq_disk = rq->rq_disk;
210 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
211 sense_rq->cmd[4] = cmd_len;
212 sense_rq->cmd_type = REQ_TYPE_SENSE;
213 sense_rq->cmd_flags |= REQ_PREEMPT;
214
215 if (drive->media == ide_tape)
216 sense_rq->cmd[13] = REQ_IDETAPE_PC1;
217
218 drive->sense_rq_armed = true;
219}
220EXPORT_SYMBOL_GPL(ide_prep_sense);
221
222int ide_queue_sense_rq(ide_drive_t *drive, void *special)
223{
224 /* deferred failure from ide_prep_sense() */
225 if (!drive->sense_rq_armed) {
226 printk(KERN_WARNING "%s: failed queue sense request\n",
227 drive->name);
228 return -ENOMEM;
229 }
230
231 drive->sense_rq.special = special;
232 drive->sense_rq_armed = false;
233
234 drive->hwif->rq = NULL;
235
236 elv_add_request(drive->queue, &drive->sense_rq,
237 ELEVATOR_INSERT_FRONT, 0);
238 return 0;
239}
240EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
241
194/* 242/*
195 * Called when an error was detected during the last packet command. 243 * Called when an error was detected during the last packet command.
196 * We queue a request sense packet command in the head of the request list. 244 * We queue a request sense packet command at the head of the request
245 * queue.
197 */ 246 */
198void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk) 247void ide_retry_pc(ide_drive_t *drive)
199{ 248{
200 struct request *rq = &drive->request_sense_rq; 249 struct request *failed_rq = drive->hwif->rq;
250 struct request *sense_rq = &drive->sense_rq;
201 struct ide_atapi_pc *pc = &drive->request_sense_pc; 251 struct ide_atapi_pc *pc = &drive->request_sense_pc;
202 252
203 (void)ide_read_error(drive); 253 (void)ide_read_error(drive);
204 ide_create_request_sense_cmd(drive, pc); 254
255 /* init pc from sense_rq */
256 ide_init_pc(pc);
257 memcpy(pc->c, sense_rq->cmd, 12);
258 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
259 pc->req_xfer = blk_rq_bytes(sense_rq);
260
205 if (drive->media == ide_tape) 261 if (drive->media == ide_tape)
206 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 262 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
207 ide_queue_pc_head(drive, disk, pc, rq); 263
264 /*
265 * Push back the failed request and put request sense on top
266 * of it. The failed command will be retried after sense data
267 * is acquired.
268 */
269 blk_requeue_request(failed_rq->q, failed_rq);
270 drive->hwif->rq = NULL;
271 if (ide_queue_sense_rq(drive, pc)) {
272 blk_start_request(failed_rq);
273 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
274 }
208} 275}
209EXPORT_SYMBOL_GPL(ide_retry_pc); 276EXPORT_SYMBOL_GPL(ide_retry_pc);
210 277
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
246 return 32768; 313 return 32768;
247 else if (blk_sense_request(rq) || blk_pc_request(rq) || 314 else if (blk_sense_request(rq) || blk_pc_request(rq) ||
248 rq->cmd_type == REQ_TYPE_ATA_PC) 315 rq->cmd_type == REQ_TYPE_ATA_PC)
249 return rq->data_len; 316 return blk_rq_bytes(rq);
250 else 317 else
251 return 0; 318 return 0;
252} 319}
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
276 struct ide_cmd *cmd = &hwif->cmd; 343 struct ide_cmd *cmd = &hwif->cmd;
277 struct request *rq = hwif->rq; 344 struct request *rq = hwif->rq;
278 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 345 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
279 xfer_func_t *xferfunc;
280 unsigned int timeout, done; 346 unsigned int timeout, done;
281 u16 bcount; 347 u16 bcount;
282 u8 stat, ireason, dsc = 0; 348 u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
303 drive->name, rq_data_dir(pc->rq) 369 drive->name, rq_data_dir(pc->rq)
304 ? "write" : "read"); 370 ? "write" : "read");
305 pc->flags |= PC_FLAG_DMA_ERROR; 371 pc->flags |= PC_FLAG_DMA_ERROR;
306 } else { 372 } else
307 pc->xferred = pc->req_xfer; 373 pc->xferred = pc->req_xfer;
308 if (drive->pc_update_buffers)
309 drive->pc_update_buffers(drive, pc);
310 }
311 debug_log("%s: DMA finished\n", drive->name); 374 debug_log("%s: DMA finished\n", drive->name);
312 } 375 }
313 376
314 /* No more interrupts */ 377 /* No more interrupts */
315 if ((stat & ATA_DRQ) == 0) { 378 if ((stat & ATA_DRQ) == 0) {
316 int uptodate, error; 379 int uptodate, error;
317 unsigned int done;
318 380
319 debug_log("Packet command completed, %d bytes transferred\n", 381 debug_log("Packet command completed, %d bytes transferred\n",
320 pc->xferred); 382 pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
343 debug_log("[cmd %x]: check condition\n", rq->cmd[0]); 405 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
344 406
345 /* Retry operation */ 407 /* Retry operation */
346 ide_retry_pc(drive, rq->rq_disk); 408 ide_retry_pc(drive);
347 409
348 /* queued, but not started */ 410 /* queued, but not started */
349 return ide_stopped; 411 return ide_stopped;
@@ -361,7 +423,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
361 423
362 if (blk_special_request(rq)) { 424 if (blk_special_request(rq)) {
363 rq->errors = 0; 425 rq->errors = 0;
364 done = blk_rq_bytes(rq);
365 error = 0; 426 error = 0;
366 } else { 427 } else {
367 428
@@ -370,15 +431,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
370 rq->errors = -EIO; 431 rq->errors = -EIO;
371 } 432 }
372 433
373 if (drive->media == ide_tape)
374 done = ide_rq_bytes(rq); /* FIXME */
375 else
376 done = blk_rq_bytes(rq);
377
378 error = uptodate ? 0 : -EIO; 434 error = uptodate ? 0 : -EIO;
379 } 435 }
380 436
381 ide_complete_rq(drive, error, done); 437 ide_complete_rq(drive, error, blk_rq_bytes(rq));
382 return ide_stopped; 438 return ide_stopped;
383 } 439 }
384 440
@@ -407,21 +463,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
407 return ide_do_reset(drive); 463 return ide_do_reset(drive);
408 } 464 }
409 465
410 xferfunc = write ? tp_ops->output_data : tp_ops->input_data; 466 done = min_t(unsigned int, bcount, cmd->nleft);
411 467 ide_pio_bytes(drive, cmd, write, done);
412 if (drive->media == ide_floppy && pc->buf == NULL) {
413 done = min_t(unsigned int, bcount, cmd->nleft);
414 ide_pio_bytes(drive, cmd, write, done);
415 } else if (drive->media == ide_tape && pc->bh) {
416 done = drive->pc_io_buffers(drive, pc, bcount, write);
417 } else {
418 done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
419 xferfunc(drive, NULL, pc->cur_pos, done);
420 }
421 468
422 /* Update the current position */ 469 /* Update transferred byte count */
423 pc->xferred += done; 470 pc->xferred += done;
424 pc->cur_pos += done;
425 471
426 bcount -= done; 472 bcount -= done;
427 473
@@ -599,7 +645,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
599 645
600 /* We haven't transferred any data yet */ 646 /* We haven't transferred any data yet */
601 pc->xferred = 0; 647 pc->xferred = 0;
602 pc->cur_pos = pc->buf;
603 648
604 valid_tf = IDE_VALID_DEVICE; 649 valid_tf = IDE_VALID_DEVICE;
605 bcount = ((drive->media == ide_tape) ? 650 bcount = ((drive->media == ide_tape) ?
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 925eb9e245d1..1799328decfb 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
206 ide_cd_log_error(drive->name, failed_command, sense); 206 ide_cd_log_error(drive->name, failed_command, sense);
207} 207}
208 208
209static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
210 struct request *failed_command)
211{
212 struct cdrom_info *info = drive->driver_data;
213 struct request *rq = &drive->request_sense_rq;
214
215 ide_debug_log(IDE_DBG_SENSE, "enter");
216
217 if (sense == NULL)
218 sense = &info->sense_data;
219
220 /* stuff the sense request in front of our current request */
221 blk_rq_init(NULL, rq);
222 rq->cmd_type = REQ_TYPE_ATA_PC;
223 rq->rq_disk = info->disk;
224
225 rq->data = sense;
226 rq->cmd[0] = GPCMD_REQUEST_SENSE;
227 rq->cmd[4] = 18;
228 rq->data_len = 18;
229
230 rq->cmd_type = REQ_TYPE_SENSE;
231 rq->cmd_flags |= REQ_PREEMPT;
232
233 /* NOTE! Save the failed command in "rq->buffer" */
234 rq->buffer = (void *) failed_command;
235
236 if (failed_command)
237 ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
238 failed_command->cmd[0]);
239
240 drive->hwif->rq = NULL;
241
242 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
243}
244
245static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) 209static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
246{ 210{
247 /* 211 /*
248 * For REQ_TYPE_SENSE, "rq->buffer" points to the original 212 * For REQ_TYPE_SENSE, "rq->special" points to the original
249 * failed request 213 * failed request. Also, the sense data should be read
214 * directly from rq which might be different from the original
215 * sense buffer if it got copied during mapping.
250 */ 216 */
251 struct request *failed = (struct request *)rq->buffer; 217 struct request *failed = (struct request *)rq->special;
252 struct cdrom_info *info = drive->driver_data; 218 void *sense = bio_data(rq->bio);
253 void *sense = &info->sense_data;
254 219
255 if (failed) { 220 if (failed) {
256 if (failed->sense) { 221 if (failed->sense) {
222 /*
223 * Sense is always read into drive->sense_data.
224 * Copy back if the failed request has its
225 * sense pointer set.
226 */
227 memcpy(failed->sense, sense, 18);
257 sense = failed->sense; 228 sense = failed->sense;
258 failed->sense_len = rq->sense_len; 229 failed->sense_len = rq->sense_len;
259 } 230 }
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
428 399
429 /* if we got a CHECK_CONDITION status, queue a request sense command */ 400 /* if we got a CHECK_CONDITION status, queue a request sense command */
430 if (stat & ATA_ERR) 401 if (stat & ATA_ERR)
431 cdrom_queue_request_sense(drive, NULL, NULL); 402 return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
432 return 1; 403 return 1;
433 404
434end_request: 405end_request:
435 if (stat & ATA_ERR) { 406 if (stat & ATA_ERR) {
436 struct request_queue *q = drive->queue;
437 unsigned long flags;
438
439 spin_lock_irqsave(q->queue_lock, flags);
440 blkdev_dequeue_request(rq);
441 spin_unlock_irqrestore(q->queue_lock, flags);
442
443 hwif->rq = NULL; 407 hwif->rq = NULL;
444 408 return ide_queue_sense_rq(drive, rq) ? 2 : 1;
445 cdrom_queue_request_sense(drive, rq->sense, rq);
446 return 1;
447 } else 409 } else
448 return 2; 410 return 2;
449} 411}
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
503 * and some drives don't send them. Sigh. 465 * and some drives don't send them. Sigh.
504 */ 466 */
505 if (rq->cmd[0] == GPCMD_REQUEST_SENSE && 467 if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
506 cmd->nleft > 0 && cmd->nleft <= 5) { 468 cmd->nleft > 0 && cmd->nleft <= 5)
507 unsigned int ofs = cmd->nbytes - cmd->nleft; 469 cmd->nleft = 0;
508
509 while (cmd->nleft > 0) {
510 *((u8 *)rq->data + ofs++) = 0;
511 cmd->nleft--;
512 }
513 }
514} 470}
515 471
516int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, 472int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
543 rq->cmd_flags |= cmd_flags; 499 rq->cmd_flags |= cmd_flags;
544 rq->timeout = timeout; 500 rq->timeout = timeout;
545 if (buffer) { 501 if (buffer) {
546 rq->data = buffer; 502 error = blk_rq_map_kern(drive->queue, rq, buffer,
547 rq->data_len = *bufflen; 503 *bufflen, GFP_NOIO);
504 if (error) {
505 blk_put_request(rq);
506 return error;
507 }
548 } 508 }
549 509
550 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 510 error = blk_execute_rq(drive->queue, info->disk, rq, 0);
551 511
552 if (buffer) 512 if (buffer)
553 *bufflen = rq->data_len; 513 *bufflen = rq->resid_len;
554 514
555 flags = rq->cmd_flags; 515 flags = rq->cmd_flags;
556 blk_put_request(rq); 516 blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
608 struct request *rq = hwif->rq; 568 struct request *rq = hwif->rq;
609 ide_expiry_t *expiry = NULL; 569 ide_expiry_t *expiry = NULL;
610 int dma_error = 0, dma, thislen, uptodate = 0; 570 int dma_error = 0, dma, thislen, uptodate = 0;
611 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors; 571 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
612 int sense = blk_sense_request(rq); 572 int sense = blk_sense_request(rq);
613 unsigned int timeout; 573 unsigned int timeout;
614 u16 len; 574 u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
738 698
739out_end: 699out_end:
740 if (blk_pc_request(rq) && rc == 0) { 700 if (blk_pc_request(rq) && rc == 0) {
741 unsigned int dlen = rq->data_len; 701 rq->resid_len = 0;
742 702 blk_end_request_all(rq, 0);
743 rq->data_len = 0;
744
745 if (blk_end_request(rq, 0, dlen))
746 BUG();
747
748 hwif->rq = NULL; 703 hwif->rq = NULL;
749 } else { 704 } else {
750 if (sense && uptodate) 705 if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
762 ide_cd_error_cmd(drive, cmd); 717 ide_cd_error_cmd(drive, cmd);
763 718
764 /* make sure it's fully ended */ 719 /* make sure it's fully ended */
765 if (blk_pc_request(rq))
766 nsectors = (rq->data_len + 511) >> 9;
767 else
768 nsectors = rq->hard_nr_sectors;
769
770 if (nsectors == 0)
771 nsectors = 1;
772
773 if (blk_fs_request(rq) == 0) { 720 if (blk_fs_request(rq) == 0) {
774 rq->data_len -= (cmd->nbytes - cmd->nleft); 721 rq->resid_len -= cmd->nbytes - cmd->nleft;
775 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 722 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
776 rq->data_len += cmd->last_xfer_len; 723 rq->resid_len += cmd->last_xfer_len;
777 } 724 }
778 725
779 ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); 726 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
780 727
781 if (sense && rc == 2) 728 if (sense && rc == 2)
782 ide_error(drive, "request sense failure", stat); 729 ide_error(drive, "request sense failure", stat);
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
809 } 756 }
810 757
811 /* fs requests *must* be hardware frame aligned */ 758 /* fs requests *must* be hardware frame aligned */
812 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 759 if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
813 (rq->sector & (sectors_per_frame - 1))) 760 (blk_rq_pos(rq) & (sectors_per_frame - 1)))
814 return ide_stopped; 761 return ide_stopped;
815 762
816 /* use DMA, if possible */ 763 /* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
838 drive->dma = 0; 785 drive->dma = 0;
839 786
840 /* sg request */ 787 /* sg request */
841 if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) { 788 if (rq->bio) {
842 struct request_queue *q = drive->queue; 789 struct request_queue *q = drive->queue;
790 char *buf = bio_data(rq->bio);
843 unsigned int alignment; 791 unsigned int alignment;
844 char *buf;
845
846 if (rq->bio)
847 buf = bio_data(rq->bio);
848 else
849 buf = rq->data;
850 792
851 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 793 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
852 794
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
858 */ 800 */
859 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 801 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
860 if ((unsigned long)buf & alignment 802 if ((unsigned long)buf & alignment
861 || rq->data_len & q->dma_pad_mask 803 || blk_rq_bytes(rq) & q->dma_pad_mask
862 || object_is_on_stack(buf)) 804 || object_is_on_stack(buf))
863 drive->dma = 0; 805 drive->dma = 0;
864 } 806 }
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
896 goto out_end; 838 goto out_end;
897 } 839 }
898 840
841 /* prepare sense request for this command */
842 ide_prep_sense(drive, rq);
843
899 memset(&cmd, 0, sizeof(cmd)); 844 memset(&cmd, 0, sizeof(cmd));
900 845
901 if (rq_data_dir(rq)) 846 if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
903 848
904 cmd.rq = rq; 849 cmd.rq = rq;
905 850
906 if (blk_fs_request(rq) || rq->data_len) { 851 if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
907 ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9) 852 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
908 : rq->data_len);
909 ide_map_sg(drive, &cmd); 853 ide_map_sg(drive, &cmd);
910 } 854 }
911 855
912 return ide_issue_pc(drive, &cmd); 856 return ide_issue_pc(drive, &cmd);
913out_end: 857out_end:
914 nsectors = rq->hard_nr_sectors; 858 nsectors = blk_rq_sectors(rq);
915 859
916 if (nsectors == 0) 860 if (nsectors == 0)
917 nsectors = 1; 861 nsectors = 1;
@@ -1395,8 +1339,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1395static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1396{ 1340{
1397 int hard_sect = queue_hardsect_size(q); 1341 int hard_sect = queue_hardsect_size(q);
1398 long block = (long)rq->hard_sector / (hard_sect >> 9); 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1399 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1400 1344
1401 memset(rq->cmd, 0, BLK_MAX_CDB); 1345 memset(rq->cmd, 0, BLK_MAX_CDB);
1402 1346
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1d97101099ce..93a3cf1b0f3f 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -87,10 +87,6 @@ struct cdrom_info {
87 87
88 struct atapi_toc *toc; 88 struct atapi_toc *toc;
89 89
90 /* The result of the last successful request sense command
91 on this device. */
92 struct request_sense sense_data;
93
94 u8 max_speed; /* Max speed of the drive. */ 90 u8 max_speed; /* Max speed of the drive. */
95 u8 current_speed; /* Current speed of the drive. */ 91 u8 current_speed; /* Current speed of the drive. */
96 92
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a9fbe2c31210..ad18e14043c5 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82 sector_t block) 82 sector_t block)
83{ 83{
84 ide_hwif_t *hwif = drive->hwif; 84 ide_hwif_t *hwif = drive->hwif;
85 u16 nsectors = (u16)rq->nr_sectors; 85 u16 nsectors = (u16)blk_rq_sectors(rq);
86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88 struct ide_cmd cmd; 88 struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
90 ide_startstop_t rc; 90 ide_startstop_t rc;
91 91
92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { 92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93 if (block + rq->nr_sectors > 1ULL << 28) 93 if (block + blk_rq_sectors(rq) > 1ULL << 28)
94 dma = 0; 94 dma = 0;
95 else 95 else
96 lba48 = 0; 96 lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
195 195
196 ledtrig_ide_activity(); 196 ledtrig_ide_activity();
197 197
198 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 198 pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
200 (unsigned long long)block, rq->nr_sectors, 200 (unsigned long long)block, blk_rq_sectors(rq),
201 (unsigned long)rq->buffer); 201 (unsigned long)rq->buffer);
202 202
203 if (hwif->rw_disk) 203 if (hwif->rw_disk)
@@ -411,7 +411,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
411 cmd->protocol = ATA_PROT_NODATA; 411 cmd->protocol = ATA_PROT_NODATA;
412 412
413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
414 rq->cmd_flags |= REQ_SOFTBARRIER;
415 rq->special = cmd; 414 rq->special = cmd;
416} 415}
417 416
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a0b8cab1d9a6..001f68f0bb28 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
103 ide_finish_cmd(drive, cmd, stat); 103 ide_finish_cmd(drive, cmd, stat);
104 else 104 else
105 ide_complete_rq(drive, 0, 105 ide_complete_rq(drive, 0,
106 cmd->rq->nr_sectors << 9); 106 blk_rq_sectors(cmd->rq) << 9);
107 return ide_stopped; 107 return ide_stopped;
108 } 108 }
109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", 109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -510,23 +510,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
510 /* 510 /*
511 * un-busy drive etc and make sure request is sane 511 * un-busy drive etc and make sure request is sane
512 */ 512 */
513
514 rq = hwif->rq; 513 rq = hwif->rq;
515 if (!rq) 514 if (rq) {
516 goto out; 515 hwif->rq = NULL;
517 516 rq->errors = 0;
518 hwif->rq = NULL; 517 }
519
520 rq->errors = 0;
521
522 if (!rq->bio)
523 goto out;
524
525 rq->sector = rq->bio->bi_sector;
526 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
527 rq->hard_cur_sectors = rq->current_nr_sectors;
528 rq->buffer = bio_data(rq->bio);
529out:
530 return ret; 518 return ret;
531} 519}
532 520
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2b4868d95f8b..650981758f15 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
134 drive->pc = pc; 134 drive->pc = pc;
135 135
136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) { 136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
137 unsigned int done = blk_rq_bytes(drive->hwif->rq);
138
137 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR)) 139 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
138 ide_floppy_report_error(floppy, pc); 140 ide_floppy_report_error(floppy, pc);
141
139 /* Giving up */ 142 /* Giving up */
140 pc->error = IDE_DRV_ERROR_GENERAL; 143 pc->error = IDE_DRV_ERROR_GENERAL;
141 144
142 drive->failed_pc = NULL; 145 drive->failed_pc = NULL;
143 drive->pc_callback(drive, 0); 146 drive->pc_callback(drive, 0);
147 ide_complete_rq(drive, -EIO, done);
144 return ide_stopped; 148 return ide_stopped;
145 } 149 }
146 150
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
190{ 194{
191 struct ide_disk_obj *floppy = drive->driver_data; 195 struct ide_disk_obj *floppy = drive->driver_data;
192 int block = sector / floppy->bs_factor; 196 int block = sector / floppy->bs_factor;
193 int blocks = rq->nr_sectors / floppy->bs_factor; 197 int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
194 int cmd = rq_data_dir(rq); 198 int cmd = rq_data_dir(rq);
195 199
196 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); 200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
216 ide_init_pc(pc); 220 ide_init_pc(pc);
217 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 221 memcpy(pc->c, rq->cmd, sizeof(pc->c));
218 pc->rq = rq; 222 pc->rq = rq;
219 if (rq->data_len && rq_data_dir(rq) == WRITE) 223 if (blk_rq_bytes(rq)) {
220 pc->flags |= PC_FLAG_WRITING;
221 pc->buf = rq->data;
222 if (rq->bio)
223 pc->flags |= PC_FLAG_DMA_OK; 224 pc->flags |= PC_FLAG_DMA_OK;
224 /* 225 if (rq_data_dir(rq) == WRITE)
225 * possibly problematic, doesn't look like ide-floppy correctly 226 pc->flags |= PC_FLAG_WRITING;
226 * handled scattered requests if dma fails... 227 }
227 */ 228 /* pio will be performed by ide_pio_bytes() which handles sg fine */
228 pc->req_xfer = pc->buf_size = rq->data_len; 229 pc->buf = NULL;
230 pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
229} 231}
230 232
231static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, 233static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
257 goto out_end; 259 goto out_end;
258 } 260 }
259 if (blk_fs_request(rq)) { 261 if (blk_fs_request(rq)) {
260 if (((long)rq->sector % floppy->bs_factor) || 262 if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
261 (rq->nr_sectors % floppy->bs_factor)) { 263 (blk_rq_sectors(rq) % floppy->bs_factor)) {
262 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
263 drive->name); 265 drive->name);
264 goto out_end; 266 goto out_end;
265 } 267 }
266 pc = &floppy->queued_pc; 268 pc = &floppy->queued_pc;
267 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 269 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
268 } else if (blk_special_request(rq)) { 270 } else if (blk_special_request(rq) || blk_sense_request(rq)) {
269 pc = (struct ide_atapi_pc *) rq->buffer; 271 pc = (struct ide_atapi_pc *)rq->special;
270 } else if (blk_pc_request(rq)) { 272 } else if (blk_pc_request(rq)) {
271 pc = &floppy->queued_pc; 273 pc = &floppy->queued_pc;
272 idefloppy_blockpc_cmd(floppy, pc, rq); 274 idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
275 goto out_end; 277 goto out_end;
276 } 278 }
277 279
280 ide_prep_sense(drive, rq);
281
278 memset(&cmd, 0, sizeof(cmd)); 282 memset(&cmd, 0, sizeof(cmd));
279 283
280 if (rq_data_dir(rq)) 284 if (rq_data_dir(rq))
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6415a2e2ba87..bba4297f2f03 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
116unsigned int ide_rq_bytes(struct request *rq) 116unsigned int ide_rq_bytes(struct request *rq)
117{ 117{
118 if (blk_pc_request(rq)) 118 if (blk_pc_request(rq))
119 return rq->data_len; 119 return blk_rq_bytes(rq);
120 else 120 else
121 return rq->hard_cur_sectors << 9; 121 return blk_rq_cur_sectors(rq) << 9;
122} 122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes); 123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124 124
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
133 * and complete the whole request right now 133 * and complete the whole request right now
134 */ 134 */
135 if (blk_noretry_request(rq) && error <= 0) 135 if (blk_noretry_request(rq) && error <= 0)
136 nr_bytes = rq->hard_nr_sectors << 9; 136 nr_bytes = blk_rq_sectors(rq) << 9;
137 137
138 rc = ide_end_rq(drive, rq, error, nr_bytes); 138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0) 139 if (rc == 0)
@@ -248,14 +248,7 @@ void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
248 struct scatterlist *sg = hwif->sg_table; 248 struct scatterlist *sg = hwif->sg_table;
249 struct request *rq = cmd->rq; 249 struct request *rq = cmd->rq;
250 250
251 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 251 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
252 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
253 cmd->sg_nents = 1;
254 } else if (!rq->bio) {
255 sg_init_one(sg, rq->data, rq->data_len);
256 cmd->sg_nents = 1;
257 } else
258 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
259} 252}
260EXPORT_SYMBOL_GPL(ide_map_sg); 253EXPORT_SYMBOL_GPL(ide_map_sg);
261 254
@@ -286,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
286 279
287 if (cmd) { 280 if (cmd) {
288 if (cmd->protocol == ATA_PROT_PIO) { 281 if (cmd->protocol == ATA_PROT_PIO) {
289 ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 282 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
290 ide_map_sg(drive, cmd); 283 ide_map_sg(drive, cmd);
291 } 284 }
292 285
@@ -371,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
371 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 364 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
372 return execute_drive_cmd(drive, rq); 365 return execute_drive_cmd(drive, rq);
373 else if (blk_pm_request(rq)) { 366 else if (blk_pm_request(rq)) {
374 struct request_pm_state *pm = rq->data; 367 struct request_pm_state *pm = rq->special;
375#ifdef DEBUG_PM 368#ifdef DEBUG_PM
376 printk("%s: start_power_step(step: %d)\n", 369 printk("%s: start_power_step(step: %d)\n",
377 drive->name, pm->pm_step); 370 drive->name, pm->pm_step);
@@ -394,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
394 387
395 drv = *(struct ide_driver **)rq->rq_disk->private_data; 388 drv = *(struct ide_driver **)rq->rq_disk->private_data;
396 389
397 return drv->do_request(drive, rq, rq->sector); 390 return drv->do_request(drive, rq, blk_rq_pos(rq));
398 } 391 }
399 return do_special(drive); 392 return do_special(drive);
400kill_rq: 393kill_rq:
@@ -484,6 +477,9 @@ void do_ide_request(struct request_queue *q)
484 477
485 spin_unlock_irq(q->queue_lock); 478 spin_unlock_irq(q->queue_lock);
486 479
480 /* HLD do_request() callback might sleep, make sure it's okay */
481 might_sleep();
482
487 if (ide_lock_host(host, hwif)) 483 if (ide_lock_host(host, hwif))
488 goto plug_device_2; 484 goto plug_device_2;
489 485
@@ -491,10 +487,10 @@ void do_ide_request(struct request_queue *q)
491 487
492 if (!ide_lock_port(hwif)) { 488 if (!ide_lock_port(hwif)) {
493 ide_hwif_t *prev_port; 489 ide_hwif_t *prev_port;
490
491 WARN_ON_ONCE(hwif->rq);
494repeat: 492repeat:
495 prev_port = hwif->host->cur_port; 493 prev_port = hwif->host->cur_port;
496 hwif->rq = NULL;
497
498 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 494 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
499 time_after(drive->sleep, jiffies)) { 495 time_after(drive->sleep, jiffies)) {
500 ide_unlock_port(hwif); 496 ide_unlock_port(hwif);
@@ -523,7 +519,9 @@ repeat:
523 * we know that the queue isn't empty, but this can happen 519 * we know that the queue isn't empty, but this can happen
524 * if the q->prep_rq_fn() decides to kill a request 520 * if the q->prep_rq_fn() decides to kill a request
525 */ 521 */
526 rq = elv_next_request(drive->queue); 522 if (!rq)
523 rq = blk_fetch_request(drive->queue);
524
527 spin_unlock_irq(q->queue_lock); 525 spin_unlock_irq(q->queue_lock);
528 spin_lock_irq(&hwif->lock); 526 spin_lock_irq(&hwif->lock);
529 527
@@ -535,7 +533,7 @@ repeat:
535 /* 533 /*
536 * Sanity: don't accept a request that isn't a PM request 534 * Sanity: don't accept a request that isn't a PM request
537 * if we are currently power managed. This is very important as 535 * if we are currently power managed. This is very important as
538 * blk_stop_queue() doesn't prevent the elv_next_request() 536 * blk_stop_queue() doesn't prevent the blk_fetch_request()
539 * above to return us whatever is in the queue. Since we call 537 * above to return us whatever is in the queue. Since we call
540 * ide_do_request() ourselves, we end up taking requests while 538 * ide_do_request() ourselves, we end up taking requests while
541 * the queue is blocked... 539 * the queue is blocked...
@@ -559,8 +557,11 @@ repeat:
559 startstop = start_request(drive, rq); 557 startstop = start_request(drive, rq);
560 spin_lock_irq(&hwif->lock); 558 spin_lock_irq(&hwif->lock);
561 559
562 if (startstop == ide_stopped) 560 if (startstop == ide_stopped) {
561 rq = hwif->rq;
562 hwif->rq = NULL;
563 goto repeat; 563 goto repeat;
564 }
564 } else 565 } else
565 goto plug_device; 566 goto plug_device;
566out: 567out:
@@ -576,18 +577,24 @@ plug_device:
576plug_device_2: 577plug_device_2:
577 spin_lock_irq(q->queue_lock); 578 spin_lock_irq(q->queue_lock);
578 579
580 if (rq)
581 blk_requeue_request(q, rq);
579 if (!elv_queue_empty(q)) 582 if (!elv_queue_empty(q))
580 blk_plug_device(q); 583 blk_plug_device(q);
581} 584}
582 585
583static void ide_plug_device(ide_drive_t *drive) 586static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
584{ 587{
585 struct request_queue *q = drive->queue; 588 struct request_queue *q = drive->queue;
586 unsigned long flags; 589 unsigned long flags;
587 590
588 spin_lock_irqsave(q->queue_lock, flags); 591 spin_lock_irqsave(q->queue_lock, flags);
592
593 if (rq)
594 blk_requeue_request(q, rq);
589 if (!elv_queue_empty(q)) 595 if (!elv_queue_empty(q))
590 blk_plug_device(q); 596 blk_plug_device(q);
597
591 spin_unlock_irqrestore(q->queue_lock, flags); 598 spin_unlock_irqrestore(q->queue_lock, flags);
592} 599}
593 600
@@ -636,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
636 unsigned long flags; 643 unsigned long flags;
637 int wait = -1; 644 int wait = -1;
638 int plug_device = 0; 645 int plug_device = 0;
646 struct request *uninitialized_var(rq_in_flight);
639 647
640 spin_lock_irqsave(&hwif->lock, flags); 648 spin_lock_irqsave(&hwif->lock, flags);
641 649
@@ -697,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
697 spin_lock_irq(&hwif->lock); 705 spin_lock_irq(&hwif->lock);
698 enable_irq(hwif->irq); 706 enable_irq(hwif->irq);
699 if (startstop == ide_stopped && hwif->polling == 0) { 707 if (startstop == ide_stopped && hwif->polling == 0) {
708 rq_in_flight = hwif->rq;
709 hwif->rq = NULL;
700 ide_unlock_port(hwif); 710 ide_unlock_port(hwif);
701 plug_device = 1; 711 plug_device = 1;
702 } 712 }
@@ -705,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
705 715
706 if (plug_device) { 716 if (plug_device) {
707 ide_unlock_host(hwif->host); 717 ide_unlock_host(hwif->host);
708 ide_plug_device(drive); 718 ide_requeue_and_plug(drive, rq_in_flight);
709 } 719 }
710} 720}
711 721
@@ -791,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
791 ide_startstop_t startstop; 801 ide_startstop_t startstop;
792 irqreturn_t irq_ret = IRQ_NONE; 802 irqreturn_t irq_ret = IRQ_NONE;
793 int plug_device = 0; 803 int plug_device = 0;
804 struct request *uninitialized_var(rq_in_flight);
794 805
795 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 806 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
796 if (hwif != host->cur_port) 807 if (hwif != host->cur_port)
@@ -870,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
870 */ 881 */
871 if (startstop == ide_stopped && hwif->polling == 0) { 882 if (startstop == ide_stopped && hwif->polling == 0) {
872 BUG_ON(hwif->handler); 883 BUG_ON(hwif->handler);
884 rq_in_flight = hwif->rq;
885 hwif->rq = NULL;
873 ide_unlock_port(hwif); 886 ide_unlock_port(hwif);
874 plug_device = 1; 887 plug_device = 1;
875 } 888 }
@@ -879,7 +892,7 @@ out:
879out_early: 892out_early:
880 if (plug_device) { 893 if (plug_device) {
881 ide_unlock_host(hwif->host); 894 ide_unlock_host(hwif->host);
882 ide_plug_device(drive); 895 ide_requeue_and_plug(drive, rq_in_flight);
883 } 896 }
884 897
885 return irq_ret; 898 return irq_ret;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index c1c25ebbaa1f..5991b23793f2 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
231 rq->cmd_type = REQ_TYPE_SPECIAL; 231 rq->cmd_type = REQ_TYPE_SPECIAL;
232 rq->cmd_len = 1; 232 rq->cmd_len = 1;
233 rq->cmd[0] = REQ_DRIVE_RESET; 233 rq->cmd[0] = REQ_DRIVE_RESET;
234 rq->cmd_flags |= REQ_SOFTBARRIER;
235 if (blk_execute_rq(drive->queue, NULL, rq, 1)) 234 if (blk_execute_rq(drive->queue, NULL, rq, 1))
236 ret = rq->errors; 235 ret = rq->errors;
237 blk_put_request(rq); 236 blk_put_request(rq);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 2148df836ce7..e386a32dc9ba 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
96 96
97 if (rq) 97 if (rq)
98 printk(KERN_CONT ", sector=%llu", 98 printk(KERN_CONT ", sector=%llu",
99 (unsigned long long)rq->sector); 99 (unsigned long long)blk_rq_pos(rq));
100 } 100 }
101 printk(KERN_CONT "\n"); 101 printk(KERN_CONT "\n");
102} 102}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 310d03f2b5b7..a914023d6d03 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
24 start_queue = 1; 24 start_queue = 1;
25 spin_unlock_irq(&hwif->lock); 25 spin_unlock_irq(&hwif->lock);
26 26
27 if (start_queue) { 27 if (start_queue)
28 spin_lock_irq(q->queue_lock); 28 blk_run_queue(q);
29 blk_start_queueing(q);
30 spin_unlock_irq(q->queue_lock);
31 }
32 return; 29 return;
33 } 30 }
34 spin_unlock_irq(&hwif->lock); 31 spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0d8a151c0a01..ba1488bd8430 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -7,7 +7,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7 ide_hwif_t *hwif = drive->hwif; 7 ide_hwif_t *hwif = drive->hwif;
8 struct request *rq; 8 struct request *rq;
9 struct request_pm_state rqpm; 9 struct request_pm_state rqpm;
10 struct ide_cmd cmd;
11 int ret; 10 int ret;
12 11
13 /* call ACPI _GTM only once */ 12 /* call ACPI _GTM only once */
@@ -15,11 +14,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
15 ide_acpi_get_timing(hwif); 14 ide_acpi_get_timing(hwif);
16 15
17 memset(&rqpm, 0, sizeof(rqpm)); 16 memset(&rqpm, 0, sizeof(rqpm));
18 memset(&cmd, 0, sizeof(cmd));
19 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 17 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
20 rq->cmd_type = REQ_TYPE_PM_SUSPEND; 18 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
21 rq->special = &cmd; 19 rq->special = &rqpm;
22 rq->data = &rqpm;
23 rqpm.pm_step = IDE_PM_START_SUSPEND; 20 rqpm.pm_step = IDE_PM_START_SUSPEND;
24 if (mesg.event == PM_EVENT_PRETHAW) 21 if (mesg.event == PM_EVENT_PRETHAW)
25 mesg.event = PM_EVENT_FREEZE; 22 mesg.event = PM_EVENT_FREEZE;
@@ -41,7 +38,6 @@ int generic_ide_resume(struct device *dev)
41 ide_hwif_t *hwif = drive->hwif; 38 ide_hwif_t *hwif = drive->hwif;
42 struct request *rq; 39 struct request *rq;
43 struct request_pm_state rqpm; 40 struct request_pm_state rqpm;
44 struct ide_cmd cmd;
45 int err; 41 int err;
46 42
47 /* call ACPI _PS0 / _STM only once */ 43 /* call ACPI _PS0 / _STM only once */
@@ -53,12 +49,10 @@ int generic_ide_resume(struct device *dev)
53 ide_acpi_exec_tfs(drive); 49 ide_acpi_exec_tfs(drive);
54 50
55 memset(&rqpm, 0, sizeof(rqpm)); 51 memset(&rqpm, 0, sizeof(rqpm));
56 memset(&cmd, 0, sizeof(cmd));
57 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 52 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
58 rq->cmd_type = REQ_TYPE_PM_RESUME; 53 rq->cmd_type = REQ_TYPE_PM_RESUME;
59 rq->cmd_flags |= REQ_PREEMPT; 54 rq->cmd_flags |= REQ_PREEMPT;
60 rq->special = &cmd; 55 rq->special = &rqpm;
61 rq->data = &rqpm;
62 rqpm.pm_step = IDE_PM_START_RESUME; 56 rqpm.pm_step = IDE_PM_START_RESUME;
63 rqpm.pm_state = PM_EVENT_ON; 57 rqpm.pm_state = PM_EVENT_ON;
64 58
@@ -77,7 +71,7 @@ int generic_ide_resume(struct device *dev)
77 71
78void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 72void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
79{ 73{
80 struct request_pm_state *pm = rq->data; 74 struct request_pm_state *pm = rq->special;
81 75
82#ifdef DEBUG_PM 76#ifdef DEBUG_PM
83 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 77 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +101,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
107 101
108ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 102ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
109{ 103{
110 struct request_pm_state *pm = rq->data; 104 struct request_pm_state *pm = rq->special;
111 struct ide_cmd *cmd = rq->special; 105 struct ide_cmd cmd = { };
112
113 memset(cmd, 0, sizeof(*cmd));
114 106
115 switch (pm->pm_step) { 107 switch (pm->pm_step) {
116 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 108 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
@@ -123,12 +115,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
123 return ide_stopped; 115 return ide_stopped;
124 } 116 }
125 if (ata_id_flush_ext_enabled(drive->id)) 117 if (ata_id_flush_ext_enabled(drive->id))
126 cmd->tf.command = ATA_CMD_FLUSH_EXT; 118 cmd.tf.command = ATA_CMD_FLUSH_EXT;
127 else 119 else
128 cmd->tf.command = ATA_CMD_FLUSH; 120 cmd.tf.command = ATA_CMD_FLUSH;
129 goto out_do_tf; 121 goto out_do_tf;
130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 122 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
131 cmd->tf.command = ATA_CMD_STANDBYNOW1; 123 cmd.tf.command = ATA_CMD_STANDBYNOW1;
132 goto out_do_tf; 124 goto out_do_tf;
133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 125 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
134 ide_set_max_pio(drive); 126 ide_set_max_pio(drive);
@@ -141,7 +133,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
141 ide_complete_power_step(drive, rq); 133 ide_complete_power_step(drive, rq);
142 return ide_stopped; 134 return ide_stopped;
143 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 135 case IDE_PM_IDLE: /* Resume step 2 (idle) */
144 cmd->tf.command = ATA_CMD_IDLEIMMEDIATE; 136 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
145 goto out_do_tf; 137 goto out_do_tf;
146 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 138 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
147 /* 139 /*
@@ -163,11 +155,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
163 return ide_stopped; 155 return ide_stopped;
164 156
165out_do_tf: 157out_do_tf:
166 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 158 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
167 cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 159 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
168 cmd->protocol = ATA_PROT_NODATA; 160 cmd.protocol = ATA_PROT_NODATA;
169 161
170 return do_rw_taskfile(drive, cmd); 162 return do_rw_taskfile(drive, &cmd);
171} 163}
172 164
173/** 165/**
@@ -181,7 +173,7 @@ out_do_tf:
181void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 173void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
182{ 174{
183 struct request_queue *q = drive->queue; 175 struct request_queue *q = drive->queue;
184 struct request_pm_state *pm = rq->data; 176 struct request_pm_state *pm = rq->special;
185 unsigned long flags; 177 unsigned long flags;
186 178
187 ide_complete_power_step(drive, rq); 179 ide_complete_power_step(drive, rq);
@@ -207,7 +199,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
207 199
208void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 200void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
209{ 201{
210 struct request_pm_state *pm = rq->data; 202 struct request_pm_state *pm = rq->special;
211 203
212 if (blk_pm_suspend_request(rq) && 204 if (blk_pm_suspend_request(rq) &&
213 pm->pm_step == IDE_PM_START_SUSPEND) 205 pm->pm_step == IDE_PM_START_SUSPEND)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3a53e0834cf7..683ff37d4079 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -131,13 +131,6 @@ enum {
131 IDETAPE_DIR_WRITE = (1 << 2), 131 IDETAPE_DIR_WRITE = (1 << 2),
132}; 132};
133 133
134struct idetape_bh {
135 u32 b_size;
136 atomic_t b_count;
137 struct idetape_bh *b_reqnext;
138 char *b_data;
139};
140
141/* Tape door status */ 134/* Tape door status */
142#define DOOR_UNLOCKED 0 135#define DOOR_UNLOCKED 0
143#define DOOR_LOCKED 1 136#define DOOR_LOCKED 1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
219 212
220 /* Data buffer size chosen based on the tape's recommendation */ 213 /* Data buffer size chosen based on the tape's recommendation */
221 int buffer_size; 214 int buffer_size;
222 /* merge buffer */ 215 /* Staging buffer of buffer_size bytes */
223 struct idetape_bh *merge_bh; 216 void *buf;
224 /* size of the merge buffer */ 217 /* The read/write cursor */
225 int merge_bh_size; 218 void *cur;
226 /* pointer to current buffer head within the merge buffer */ 219 /* The number of valid bytes in buf */
227 struct idetape_bh *bh; 220 size_t valid;
228 char *b_data;
229 int b_count;
230
231 int pages_per_buffer;
232 /* Wasted space in each stage */
233 int excess_bh_size;
234 221
235 /* Measures average tape speed */ 222 /* Measures average tape speed */
236 unsigned long avg_time; 223 unsigned long avg_time;
@@ -297,84 +284,6 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
297 return tape; 284 return tape;
298} 285}
299 286
300static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
301 unsigned int bcount)
302{
303 struct idetape_bh *bh = pc->bh;
304 int count;
305
306 while (bcount) {
307 if (bh == NULL)
308 break;
309 count = min(
310 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
311 bcount);
312 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
313 atomic_read(&bh->b_count), count);
314 bcount -= count;
315 atomic_add(count, &bh->b_count);
316 if (atomic_read(&bh->b_count) == bh->b_size) {
317 bh = bh->b_reqnext;
318 if (bh)
319 atomic_set(&bh->b_count, 0);
320 }
321 }
322
323 pc->bh = bh;
324
325 return bcount;
326}
327
328static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
329 unsigned int bcount)
330{
331 struct idetape_bh *bh = pc->bh;
332 int count;
333
334 while (bcount) {
335 if (bh == NULL)
336 break;
337 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
338 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
339 bcount -= count;
340 pc->b_data += count;
341 pc->b_count -= count;
342 if (!pc->b_count) {
343 bh = bh->b_reqnext;
344 pc->bh = bh;
345 if (bh) {
346 pc->b_data = bh->b_data;
347 pc->b_count = atomic_read(&bh->b_count);
348 }
349 }
350 }
351
352 return bcount;
353}
354
355static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
356{
357 struct idetape_bh *bh = pc->bh;
358 int count;
359 unsigned int bcount = pc->xferred;
360
361 if (pc->flags & PC_FLAG_WRITING)
362 return;
363 while (bcount) {
364 if (bh == NULL) {
365 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
366 __func__);
367 return;
368 }
369 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
370 atomic_set(&bh->b_count, count);
371 if (atomic_read(&bh->b_count) == bh->b_size)
372 bh = bh->b_reqnext;
373 bcount -= count;
374 }
375 pc->bh = bh;
376}
377
378/* 287/*
379 * called on each failed packet command retry to analyze the request sense. We 288 * called on each failed packet command retry to analyze the request sense. We
380 * currently do not utilize this information. 289 * currently do not utilize this information.
@@ -392,12 +301,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
392 pc->c[0], tape->sense_key, tape->asc, tape->ascq); 301 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
393 302
394 /* Correct pc->xferred by asking the tape. */ 303 /* Correct pc->xferred by asking the tape. */
395 if (pc->flags & PC_FLAG_DMA_ERROR) { 304 if (pc->flags & PC_FLAG_DMA_ERROR)
396 pc->xferred = pc->req_xfer - 305 pc->xferred = pc->req_xfer -
397 tape->blk_size * 306 tape->blk_size *
398 get_unaligned_be32(&sense[3]); 307 get_unaligned_be32(&sense[3]);
399 idetape_update_buffers(drive, pc);
400 }
401 308
402 /* 309 /*
403 * If error was the result of a zero-length read or write command, 310 * If error was the result of a zero-length read or write command,
@@ -436,29 +343,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
436 } 343 }
437} 344}
438 345
439/* Free data buffers completely. */
440static void ide_tape_kfree_buffer(idetape_tape_t *tape)
441{
442 struct idetape_bh *prev_bh, *bh = tape->merge_bh;
443
444 while (bh) {
445 u32 size = bh->b_size;
446
447 while (size) {
448 unsigned int order = fls(size >> PAGE_SHIFT)-1;
449
450 if (bh->b_data)
451 free_pages((unsigned long)bh->b_data, order);
452
453 size &= (order-1);
454 bh->b_data += (1 << order) * PAGE_SIZE;
455 }
456 prev_bh = bh;
457 bh = bh->b_reqnext;
458 kfree(prev_bh);
459 }
460}
461
462static void ide_tape_handle_dsc(ide_drive_t *); 346static void ide_tape_handle_dsc(ide_drive_t *);
463 347
464static int ide_tape_callback(ide_drive_t *drive, int dsc) 348static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
496 } 380 }
497 381
498 tape->first_frame += blocks; 382 tape->first_frame += blocks;
499 rq->current_nr_sectors -= blocks; 383 rq->resid_len -= blocks * tape->blk_size;
500 384
501 if (pc->error) { 385 if (pc->error) {
502 uptodate = 0; 386 uptodate = 0;
@@ -558,19 +442,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
558 idetape_postpone_request(drive); 442 idetape_postpone_request(drive);
559} 443}
560 444
561static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
562 unsigned int bcount, int write)
563{
564 unsigned int bleft;
565
566 if (write)
567 bleft = idetape_output_buffers(drive, pc, bcount);
568 else
569 bleft = idetape_input_buffers(drive, pc, bcount);
570
571 return bcount - bleft;
572}
573
574/* 445/*
575 * Packet Command Interface 446 * Packet Command Interface
576 * 447 *
@@ -622,6 +493,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
622 493
623 if (pc->retries > IDETAPE_MAX_PC_RETRIES || 494 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
624 (pc->flags & PC_FLAG_ABORT)) { 495 (pc->flags & PC_FLAG_ABORT)) {
496 unsigned int done = blk_rq_bytes(drive->hwif->rq);
497
625 /* 498 /*
626 * We will "abort" retrying a packet command in case legitimate 499 * We will "abort" retrying a packet command in case legitimate
627 * error code was received (crossing a filemark, or end of the 500 * error code was received (crossing a filemark, or end of the
@@ -641,8 +514,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
641 /* Giving up */ 514 /* Giving up */
642 pc->error = IDE_DRV_ERROR_GENERAL; 515 pc->error = IDE_DRV_ERROR_GENERAL;
643 } 516 }
517
644 drive->failed_pc = NULL; 518 drive->failed_pc = NULL;
645 drive->pc_callback(drive, 0); 519 drive->pc_callback(drive, 0);
520 ide_complete_rq(drive, -EIO, done);
646 return ide_stopped; 521 return ide_stopped;
647 } 522 }
648 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 523 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -695,7 +570,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
695 printk(KERN_ERR "ide-tape: %s: I/O error, ", 570 printk(KERN_ERR "ide-tape: %s: I/O error, ",
696 tape->name); 571 tape->name);
697 /* Retry operation */ 572 /* Retry operation */
698 ide_retry_pc(drive, tape->disk); 573 ide_retry_pc(drive);
699 return ide_stopped; 574 return ide_stopped;
700 } 575 }
701 pc->error = 0; 576 pc->error = 0;
@@ -711,27 +586,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
711 struct ide_atapi_pc *pc, struct request *rq, 586 struct ide_atapi_pc *pc, struct request *rq,
712 u8 opcode) 587 u8 opcode)
713{ 588{
714 struct idetape_bh *bh = (struct idetape_bh *)rq->special; 589 unsigned int length = blk_rq_sectors(rq);
715 unsigned int length = rq->current_nr_sectors;
716 590
717 ide_init_pc(pc); 591 ide_init_pc(pc);
718 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
719 pc->c[1] = 1; 593 pc->c[1] = 1;
720 pc->bh = bh;
721 pc->buf = NULL; 594 pc->buf = NULL;
722 pc->buf_size = length * tape->blk_size; 595 pc->buf_size = length * tape->blk_size;
723 pc->req_xfer = pc->buf_size; 596 pc->req_xfer = pc->buf_size;
724 if (pc->req_xfer == tape->buffer_size) 597 if (pc->req_xfer == tape->buffer_size)
725 pc->flags |= PC_FLAG_DMA_OK; 598 pc->flags |= PC_FLAG_DMA_OK;
726 599
727 if (opcode == READ_6) { 600 if (opcode == READ_6)
728 pc->c[0] = READ_6; 601 pc->c[0] = READ_6;
729 atomic_set(&bh->b_count, 0); 602 else if (opcode == WRITE_6) {
730 } else if (opcode == WRITE_6) {
731 pc->c[0] = WRITE_6; 603 pc->c[0] = WRITE_6;
732 pc->flags |= PC_FLAG_WRITING; 604 pc->flags |= PC_FLAG_WRITING;
733 pc->b_data = bh->b_data;
734 pc->b_count = atomic_read(&bh->b_count);
735 } 605 }
736 606
737 memcpy(rq->cmd, pc->c, 12); 607 memcpy(rq->cmd, pc->c, 12);
@@ -747,12 +617,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
747 struct ide_cmd cmd; 617 struct ide_cmd cmd;
748 u8 stat; 618 u8 stat;
749 619
750 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu," 620 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
751 " current_nr_sectors: %u\n", 621 (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
752 (unsigned long long)rq->sector, rq->nr_sectors,
753 rq->current_nr_sectors);
754 622
755 if (!blk_special_request(rq)) { 623 if (!(blk_special_request(rq) || blk_sense_request(rq))) {
756 /* We do not support buffer cache originated requests. */ 624 /* We do not support buffer cache originated requests. */
757 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " 625 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
758 "request queue (%d)\n", drive->name, rq->cmd_type); 626 "request queue (%d)\n", drive->name, rq->cmd_type);
@@ -828,7 +696,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
828 goto out; 696 goto out;
829 } 697 }
830 if (rq->cmd[13] & REQ_IDETAPE_PC1) { 698 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
831 pc = (struct ide_atapi_pc *) rq->buffer; 699 pc = (struct ide_atapi_pc *)rq->special;
832 rq->cmd[13] &= ~(REQ_IDETAPE_PC1); 700 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
833 rq->cmd[13] |= REQ_IDETAPE_PC2; 701 rq->cmd[13] |= REQ_IDETAPE_PC2;
834 goto out; 702 goto out;
@@ -840,6 +708,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
840 BUG(); 708 BUG();
841 709
842out: 710out:
711 /* prepare sense request for this command */
712 ide_prep_sense(drive, rq);
713
843 memset(&cmd, 0, sizeof(cmd)); 714 memset(&cmd, 0, sizeof(cmd));
844 715
845 if (rq_data_dir(rq)) 716 if (rq_data_dir(rq))
@@ -847,167 +718,10 @@ out:
847 718
848 cmd.rq = rq; 719 cmd.rq = rq;
849 720
850 return ide_tape_issue_pc(drive, &cmd, pc); 721 ide_init_sg_cmd(&cmd, pc->req_xfer);
851} 722 ide_map_sg(drive, &cmd);
852
853/*
854 * The function below uses __get_free_pages to allocate a data buffer of size
855 * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
856 * much as possible.
857 *
858 * It returns a pointer to the newly allocated buffer, or NULL in case of
859 * failure.
860 */
861static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
862 int full, int clear)
863{
864 struct idetape_bh *prev_bh, *bh, *merge_bh;
865 int pages = tape->pages_per_buffer;
866 unsigned int order, b_allocd;
867 char *b_data = NULL;
868
869 merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
870 bh = merge_bh;
871 if (bh == NULL)
872 goto abort;
873
874 order = fls(pages) - 1;
875 bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
876 if (!bh->b_data)
877 goto abort;
878 b_allocd = (1 << order) * PAGE_SIZE;
879 pages &= (order-1);
880
881 if (clear)
882 memset(bh->b_data, 0, b_allocd);
883 bh->b_reqnext = NULL;
884 bh->b_size = b_allocd;
885 atomic_set(&bh->b_count, full ? bh->b_size : 0);
886
887 while (pages) {
888 order = fls(pages) - 1;
889 b_data = (char *) __get_free_pages(GFP_KERNEL, order);
890 if (!b_data)
891 goto abort;
892 b_allocd = (1 << order) * PAGE_SIZE;
893
894 if (clear)
895 memset(b_data, 0, b_allocd);
896
897 /* newly allocated page frames below buffer header or ...*/
898 if (bh->b_data == b_data + b_allocd) {
899 bh->b_size += b_allocd;
900 bh->b_data -= b_allocd;
901 if (full)
902 atomic_add(b_allocd, &bh->b_count);
903 continue;
904 }
905 /* they are above the header */
906 if (b_data == bh->b_data + bh->b_size) {
907 bh->b_size += b_allocd;
908 if (full)
909 atomic_add(b_allocd, &bh->b_count);
910 continue;
911 }
912 prev_bh = bh;
913 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
914 if (!bh) {
915 free_pages((unsigned long) b_data, order);
916 goto abort;
917 }
918 bh->b_reqnext = NULL;
919 bh->b_data = b_data;
920 bh->b_size = b_allocd;
921 atomic_set(&bh->b_count, full ? bh->b_size : 0);
922 prev_bh->b_reqnext = bh;
923
924 pages &= (order-1);
925 }
926
927 bh->b_size -= tape->excess_bh_size;
928 if (full)
929 atomic_sub(tape->excess_bh_size, &bh->b_count);
930 return merge_bh;
931abort:
932 ide_tape_kfree_buffer(tape);
933 return NULL;
934}
935 723
936static int idetape_copy_stage_from_user(idetape_tape_t *tape, 724 return ide_tape_issue_pc(drive, &cmd, pc);
937 const char __user *buf, int n)
938{
939 struct idetape_bh *bh = tape->bh;
940 int count;
941 int ret = 0;
942
943 while (n) {
944 if (bh == NULL) {
945 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
946 __func__);
947 return 1;
948 }
949 count = min((unsigned int)
950 (bh->b_size - atomic_read(&bh->b_count)),
951 (unsigned int)n);
952 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
953 count))
954 ret = 1;
955 n -= count;
956 atomic_add(count, &bh->b_count);
957 buf += count;
958 if (atomic_read(&bh->b_count) == bh->b_size) {
959 bh = bh->b_reqnext;
960 if (bh)
961 atomic_set(&bh->b_count, 0);
962 }
963 }
964 tape->bh = bh;
965 return ret;
966}
967
968static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
969 int n)
970{
971 struct idetape_bh *bh = tape->bh;
972 int count;
973 int ret = 0;
974
975 while (n) {
976 if (bh == NULL) {
977 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
978 __func__);
979 return 1;
980 }
981 count = min(tape->b_count, n);
982 if (copy_to_user(buf, tape->b_data, count))
983 ret = 1;
984 n -= count;
985 tape->b_data += count;
986 tape->b_count -= count;
987 buf += count;
988 if (!tape->b_count) {
989 bh = bh->b_reqnext;
990 tape->bh = bh;
991 if (bh) {
992 tape->b_data = bh->b_data;
993 tape->b_count = atomic_read(&bh->b_count);
994 }
995 }
996 }
997 return ret;
998}
999
1000static void idetape_init_merge_buffer(idetape_tape_t *tape)
1001{
1002 struct idetape_bh *bh = tape->merge_bh;
1003 tape->bh = tape->merge_bh;
1004
1005 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1006 atomic_set(&bh->b_count, 0);
1007 else {
1008 tape->b_data = bh->b_data;
1009 tape->b_count = atomic_read(&bh->b_count);
1010 }
1011} 725}
1012 726
1013/* 727/*
@@ -1107,10 +821,10 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1107 return; 821 return;
1108 822
1109 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); 823 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
1110 tape->merge_bh_size = 0; 824 tape->valid = 0;
1111 if (tape->merge_bh != NULL) { 825 if (tape->buf != NULL) {
1112 ide_tape_kfree_buffer(tape); 826 kfree(tape->buf);
1113 tape->merge_bh = NULL; 827 tape->buf = NULL;
1114 } 828 }
1115 829
1116 tape->chrdev_dir = IDETAPE_DIR_NONE; 830 tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1164,36 +878,43 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
1164 * Generate a read/write request for the block device interface and wait for it 878 * Generate a read/write request for the block device interface and wait for it
1165 * to be serviced. 879 * to be serviced.
1166 */ 880 */
1167static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, 881static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
1168 struct idetape_bh *bh)
1169{ 882{
1170 idetape_tape_t *tape = drive->driver_data; 883 idetape_tape_t *tape = drive->driver_data;
1171 struct request *rq; 884 struct request *rq;
1172 int ret, errors; 885 int ret;
1173 886
1174 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); 887 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
888 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
889 BUG_ON(size < 0 || size % tape->blk_size);
1175 890
1176 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 891 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1177 rq->cmd_type = REQ_TYPE_SPECIAL; 892 rq->cmd_type = REQ_TYPE_SPECIAL;
1178 rq->cmd[13] = cmd; 893 rq->cmd[13] = cmd;
1179 rq->rq_disk = tape->disk; 894 rq->rq_disk = tape->disk;
1180 rq->special = (void *)bh;
1181 rq->sector = tape->first_frame;
1182 rq->nr_sectors = blocks;
1183 rq->current_nr_sectors = blocks;
1184 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1185 895
1186 errors = rq->errors; 896 if (size) {
1187 ret = tape->blk_size * (blocks - rq->current_nr_sectors); 897 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
1188 blk_put_request(rq); 898 __GFP_WAIT);
899 if (ret)
900 goto out_put;
901 }
1189 902
1190 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) 903 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1191 return 0;
1192 904
1193 if (tape->merge_bh) 905 /* calculate the number of transferred bytes and update buffer state */
1194 idetape_init_merge_buffer(tape); 906 size -= rq->resid_len;
1195 if (errors == IDE_DRV_ERROR_GENERAL) 907 tape->cur = tape->buf;
1196 return -EIO; 908 if (cmd == REQ_IDETAPE_READ)
909 tape->valid = size;
910 else
911 tape->valid = 0;
912
913 ret = size;
914 if (rq->errors == IDE_DRV_ERROR_GENERAL)
915 ret = -EIO;
916out_put:
917 blk_put_request(rq);
1197 return ret; 918 return ret;
1198} 919}
1199 920
@@ -1230,153 +951,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
1230 pc->flags |= PC_FLAG_WAIT_FOR_DSC; 951 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1231} 952}
1232 953
1233/* Queue up a character device originated write request. */
1234static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
1235{
1236 idetape_tape_t *tape = drive->driver_data;
1237
1238 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
1239
1240 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1241 blocks, tape->merge_bh);
1242}
1243
1244static void ide_tape_flush_merge_buffer(ide_drive_t *drive) 954static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
1245{ 955{
1246 idetape_tape_t *tape = drive->driver_data; 956 idetape_tape_t *tape = drive->driver_data;
1247 int blocks, min;
1248 struct idetape_bh *bh;
1249 957
1250 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 958 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
1251 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer" 959 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
1252 " but we are not writing.\n"); 960 " but we are not writing.\n");
1253 return; 961 return;
1254 } 962 }
1255 if (tape->merge_bh_size > tape->buffer_size) { 963 if (tape->buf) {
1256 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); 964 size_t aligned = roundup(tape->valid, tape->blk_size);
1257 tape->merge_bh_size = tape->buffer_size; 965
1258 } 966 memset(tape->cur, 0, aligned - tape->valid);
1259 if (tape->merge_bh_size) { 967 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
1260 blocks = tape->merge_bh_size / tape->blk_size; 968 kfree(tape->buf);
1261 if (tape->merge_bh_size % tape->blk_size) { 969 tape->buf = NULL;
1262 unsigned int i;
1263
1264 blocks++;
1265 i = tape->blk_size - tape->merge_bh_size %
1266 tape->blk_size;
1267 bh = tape->bh->b_reqnext;
1268 while (bh) {
1269 atomic_set(&bh->b_count, 0);
1270 bh = bh->b_reqnext;
1271 }
1272 bh = tape->bh;
1273 while (i) {
1274 if (bh == NULL) {
1275 printk(KERN_INFO "ide-tape: bug,"
1276 " bh NULL\n");
1277 break;
1278 }
1279 min = min(i, (unsigned int)(bh->b_size -
1280 atomic_read(&bh->b_count)));
1281 memset(bh->b_data + atomic_read(&bh->b_count),
1282 0, min);
1283 atomic_add(min, &bh->b_count);
1284 i -= min;
1285 bh = bh->b_reqnext;
1286 }
1287 }
1288 (void) idetape_add_chrdev_write_request(drive, blocks);
1289 tape->merge_bh_size = 0;
1290 }
1291 if (tape->merge_bh != NULL) {
1292 ide_tape_kfree_buffer(tape);
1293 tape->merge_bh = NULL;
1294 } 970 }
1295 tape->chrdev_dir = IDETAPE_DIR_NONE; 971 tape->chrdev_dir = IDETAPE_DIR_NONE;
1296} 972}
1297 973
1298static int idetape_init_read(ide_drive_t *drive) 974static int idetape_init_rw(ide_drive_t *drive, int dir)
1299{ 975{
1300 idetape_tape_t *tape = drive->driver_data; 976 idetape_tape_t *tape = drive->driver_data;
1301 int bytes_read; 977 int rc;
1302 978
1303 /* Initialize read operation */ 979 BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
1304 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1305 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
1306 ide_tape_flush_merge_buffer(drive);
1307 idetape_flush_tape_buffers(drive);
1308 }
1309 if (tape->merge_bh || tape->merge_bh_size) {
1310 printk(KERN_ERR "ide-tape: merge_bh_size should be"
1311 " 0 now\n");
1312 tape->merge_bh_size = 0;
1313 }
1314 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1315 if (!tape->merge_bh)
1316 return -ENOMEM;
1317 tape->chrdev_dir = IDETAPE_DIR_READ;
1318 980
1319 /* 981 if (tape->chrdev_dir == dir)
1320 * Issue a read 0 command to ensure that DSC handshake is 982 return 0;
1321 * switched from completion mode to buffer available mode.
1322 * No point in issuing this if DSC overlap isn't supported, some
1323 * drives (Seagate STT3401A) will return an error.
1324 */
1325 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1326 bytes_read = idetape_queue_rw_tail(drive,
1327 REQ_IDETAPE_READ, 0,
1328 tape->merge_bh);
1329 if (bytes_read < 0) {
1330 ide_tape_kfree_buffer(tape);
1331 tape->merge_bh = NULL;
1332 tape->chrdev_dir = IDETAPE_DIR_NONE;
1333 return bytes_read;
1334 }
1335 }
1336 }
1337 983
1338 return 0; 984 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1339} 985 ide_tape_discard_merge_buffer(drive, 1);
986 else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
987 ide_tape_flush_merge_buffer(drive);
988 idetape_flush_tape_buffers(drive);
989 }
1340 990
1341/* called from idetape_chrdev_read() to service a chrdev read request. */ 991 if (tape->buf || tape->valid) {
1342static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) 992 printk(KERN_ERR "ide-tape: valid should be 0 now\n");
1343{ 993 tape->valid = 0;
1344 idetape_tape_t *tape = drive->driver_data; 994 }
1345 995
1346 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 996 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
997 if (!tape->buf)
998 return -ENOMEM;
999 tape->chrdev_dir = dir;
1000 tape->cur = tape->buf;
1347 1001
1348 /* If we are at a filemark, return a read length of 0 */ 1002 /*
1349 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1003 * Issue a 0 rw command to ensure that DSC handshake is
1350 return 0; 1004 * switched from completion mode to buffer available mode. No
1351 1005 * point in issuing this if DSC overlap isn't supported, some
1352 idetape_init_read(drive); 1006 * drives (Seagate STT3401A) will return an error.
1007 */
1008 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1009 int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
1010 : REQ_IDETAPE_WRITE;
1011
1012 rc = idetape_queue_rw_tail(drive, cmd, 0);
1013 if (rc < 0) {
1014 kfree(tape->buf);
1015 tape->buf = NULL;
1016 tape->chrdev_dir = IDETAPE_DIR_NONE;
1017 return rc;
1018 }
1019 }
1353 1020
1354 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, 1021 return 0;
1355 tape->merge_bh);
1356} 1022}
1357 1023
1358static void idetape_pad_zeros(ide_drive_t *drive, int bcount) 1024static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
1359{ 1025{
1360 idetape_tape_t *tape = drive->driver_data; 1026 idetape_tape_t *tape = drive->driver_data;
1361 struct idetape_bh *bh; 1027
1362 int blocks; 1028 memset(tape->buf, 0, tape->buffer_size);
1363 1029
1364 while (bcount) { 1030 while (bcount) {
1365 unsigned int count; 1031 unsigned int count = min(tape->buffer_size, bcount);
1366 1032
1367 bh = tape->merge_bh; 1033 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
1368 count = min(tape->buffer_size, bcount);
1369 bcount -= count; 1034 bcount -= count;
1370 blocks = count / tape->blk_size;
1371 while (count) {
1372 atomic_set(&bh->b_count,
1373 min(count, (unsigned int)bh->b_size));
1374 memset(bh->b_data, 0, atomic_read(&bh->b_count));
1375 count -= atomic_read(&bh->b_count);
1376 bh = bh->b_reqnext;
1377 }
1378 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
1379 tape->merge_bh);
1380 } 1035 }
1381} 1036}
1382 1037
@@ -1456,7 +1111,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1456 } 1111 }
1457 1112
1458 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1113 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1459 tape->merge_bh_size = 0; 1114 tape->valid = 0;
1460 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1115 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1461 ++count; 1116 ++count;
1462 ide_tape_discard_merge_buffer(drive, 0); 1117 ide_tape_discard_merge_buffer(drive, 0);
@@ -1505,9 +1160,9 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1505{ 1160{
1506 struct ide_tape_obj *tape = file->private_data; 1161 struct ide_tape_obj *tape = file->private_data;
1507 ide_drive_t *drive = tape->drive; 1162 ide_drive_t *drive = tape->drive;
1508 ssize_t bytes_read, temp, actually_read = 0, rc; 1163 size_t done = 0;
1509 ssize_t ret = 0; 1164 ssize_t ret = 0;
1510 u16 ctl = *(u16 *)&tape->caps[12]; 1165 int rc;
1511 1166
1512 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1167 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1513 1168
@@ -1517,49 +1172,43 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1517 (count % tape->blk_size) == 0) 1172 (count % tape->blk_size) == 0)
1518 tape->user_bs_factor = count / tape->blk_size; 1173 tape->user_bs_factor = count / tape->blk_size;
1519 } 1174 }
1520 rc = idetape_init_read(drive); 1175
1176 rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
1521 if (rc < 0) 1177 if (rc < 0)
1522 return rc; 1178 return rc;
1523 if (count == 0) 1179
1524 return (0); 1180 while (done < count) {
1525 if (tape->merge_bh_size) { 1181 size_t todo;
1526 actually_read = min((unsigned int)(tape->merge_bh_size), 1182
1527 (unsigned int)count); 1183 /* refill if staging buffer is empty */
1528 if (idetape_copy_stage_to_user(tape, buf, actually_read)) 1184 if (!tape->valid) {
1529 ret = -EFAULT; 1185 /* If we are at a filemark, nothing more to read */
1530 buf += actually_read; 1186 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1531 tape->merge_bh_size -= actually_read; 1187 break;
1532 count -= actually_read; 1188 /* read */
1533 } 1189 if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
1534 while (count >= tape->buffer_size) { 1190 tape->buffer_size) <= 0)
1535 bytes_read = idetape_add_chrdev_read_request(drive, ctl); 1191 break;
1536 if (bytes_read <= 0) 1192 }
1537 goto finish; 1193
1538 if (idetape_copy_stage_to_user(tape, buf, bytes_read)) 1194 /* copy out */
1539 ret = -EFAULT; 1195 todo = min_t(size_t, count - done, tape->valid);
1540 buf += bytes_read; 1196 if (copy_to_user(buf + done, tape->cur, todo))
1541 count -= bytes_read;
1542 actually_read += bytes_read;
1543 }
1544 if (count) {
1545 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
1546 if (bytes_read <= 0)
1547 goto finish;
1548 temp = min((unsigned long)count, (unsigned long)bytes_read);
1549 if (idetape_copy_stage_to_user(tape, buf, temp))
1550 ret = -EFAULT; 1197 ret = -EFAULT;
1551 actually_read += temp; 1198
1552 tape->merge_bh_size = bytes_read-temp; 1199 tape->cur += todo;
1200 tape->valid -= todo;
1201 done += todo;
1553 } 1202 }
1554finish: 1203
1555 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { 1204 if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
1556 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1205 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1557 1206
1558 idetape_space_over_filemarks(drive, MTFSF, 1); 1207 idetape_space_over_filemarks(drive, MTFSF, 1);
1559 return 0; 1208 return 0;
1560 } 1209 }
1561 1210
1562 return ret ? ret : actually_read; 1211 return ret ? ret : done;
1563} 1212}
1564 1213
1565static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, 1214static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1567,9 +1216,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1567{ 1216{
1568 struct ide_tape_obj *tape = file->private_data; 1217 struct ide_tape_obj *tape = file->private_data;
1569 ide_drive_t *drive = tape->drive; 1218 ide_drive_t *drive = tape->drive;
1570 ssize_t actually_written = 0; 1219 size_t done = 0;
1571 ssize_t ret = 0; 1220 ssize_t ret = 0;
1572 u16 ctl = *(u16 *)&tape->caps[12]; 1221 int rc;
1573 1222
1574 /* The drive is write protected. */ 1223 /* The drive is write protected. */
1575 if (tape->write_prot) 1224 if (tape->write_prot)
@@ -1578,80 +1227,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1578 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1227 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1579 1228
1580 /* Initialize write operation */ 1229 /* Initialize write operation */
1581 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 1230 rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
1582 if (tape->chrdev_dir == IDETAPE_DIR_READ) 1231 if (rc < 0)
1583 ide_tape_discard_merge_buffer(drive, 1); 1232 return rc;
1584 if (tape->merge_bh || tape->merge_bh_size) {
1585 printk(KERN_ERR "ide-tape: merge_bh_size "
1586 "should be 0 now\n");
1587 tape->merge_bh_size = 0;
1588 }
1589 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1590 if (!tape->merge_bh)
1591 return -ENOMEM;
1592 tape->chrdev_dir = IDETAPE_DIR_WRITE;
1593 idetape_init_merge_buffer(tape);
1594 1233
1595 /* 1234 while (done < count) {
1596 * Issue a write 0 command to ensure that DSC handshake is 1235 size_t todo;
1597 * switched from completion mode to buffer available mode. No 1236
1598 * point in issuing this if DSC overlap isn't supported, some 1237 /* flush if staging buffer is full */
1599 * drives (Seagate STT3401A) will return an error. 1238 if (tape->valid == tape->buffer_size &&
1600 */ 1239 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1601 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) { 1240 tape->buffer_size) <= 0)
1602 ssize_t retval = idetape_queue_rw_tail(drive, 1241 return rc;
1603 REQ_IDETAPE_WRITE, 0, 1242
1604 tape->merge_bh); 1243 /* copy in */
1605 if (retval < 0) { 1244 todo = min_t(size_t, count - done,
1606 ide_tape_kfree_buffer(tape); 1245 tape->buffer_size - tape->valid);
1607 tape->merge_bh = NULL; 1246 if (copy_from_user(tape->cur, buf + done, todo))
1608 tape->chrdev_dir = IDETAPE_DIR_NONE;
1609 return retval;
1610 }
1611 }
1612 }
1613 if (count == 0)
1614 return (0);
1615 if (tape->merge_bh_size) {
1616 if (tape->merge_bh_size >= tape->buffer_size) {
1617 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
1618 tape->merge_bh_size = 0;
1619 }
1620 actually_written = min((unsigned int)
1621 (tape->buffer_size - tape->merge_bh_size),
1622 (unsigned int)count);
1623 if (idetape_copy_stage_from_user(tape, buf, actually_written))
1624 ret = -EFAULT;
1625 buf += actually_written;
1626 tape->merge_bh_size += actually_written;
1627 count -= actually_written;
1628
1629 if (tape->merge_bh_size == tape->buffer_size) {
1630 ssize_t retval;
1631 tape->merge_bh_size = 0;
1632 retval = idetape_add_chrdev_write_request(drive, ctl);
1633 if (retval <= 0)
1634 return (retval);
1635 }
1636 }
1637 while (count >= tape->buffer_size) {
1638 ssize_t retval;
1639 if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
1640 ret = -EFAULT;
1641 buf += tape->buffer_size;
1642 count -= tape->buffer_size;
1643 retval = idetape_add_chrdev_write_request(drive, ctl);
1644 actually_written += tape->buffer_size;
1645 if (retval <= 0)
1646 return (retval);
1647 }
1648 if (count) {
1649 actually_written += count;
1650 if (idetape_copy_stage_from_user(tape, buf, count))
1651 ret = -EFAULT; 1247 ret = -EFAULT;
1652 tape->merge_bh_size += count; 1248
1249 tape->cur += todo;
1250 tape->valid += todo;
1251 done += todo;
1653 } 1252 }
1654 return ret ? ret : actually_written; 1253
1254 return ret ? ret : done;
1655} 1255}
1656 1256
1657static int idetape_write_filemark(ide_drive_t *drive) 1257static int idetape_write_filemark(ide_drive_t *drive)
@@ -1812,7 +1412,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
1812 idetape_flush_tape_buffers(drive); 1412 idetape_flush_tape_buffers(drive);
1813 } 1413 }
1814 if (cmd == MTIOCGET || cmd == MTIOCPOS) { 1414 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
1815 block_offset = tape->merge_bh_size / 1415 block_offset = tape->valid /
1816 (tape->blk_size * tape->user_bs_factor); 1416 (tape->blk_size * tape->user_bs_factor);
1817 position = idetape_read_position(drive); 1417 position = idetape_read_position(drive);
1818 if (position < 0) 1418 if (position < 0)
@@ -1960,12 +1560,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
1960 idetape_tape_t *tape = drive->driver_data; 1560 idetape_tape_t *tape = drive->driver_data;
1961 1561
1962 ide_tape_flush_merge_buffer(drive); 1562 ide_tape_flush_merge_buffer(drive);
1963 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0); 1563 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
1964 if (tape->merge_bh != NULL) { 1564 if (tape->buf != NULL) {
1965 idetape_pad_zeros(drive, tape->blk_size * 1565 idetape_pad_zeros(drive, tape->blk_size *
1966 (tape->user_bs_factor - 1)); 1566 (tape->user_bs_factor - 1));
1967 ide_tape_kfree_buffer(tape); 1567 kfree(tape->buf);
1968 tape->merge_bh = NULL; 1568 tape->buf = NULL;
1969 } 1569 }
1970 idetape_write_filemark(drive); 1570 idetape_write_filemark(drive);
1971 idetape_flush_tape_buffers(drive); 1571 idetape_flush_tape_buffers(drive);
@@ -2159,8 +1759,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2159 u16 *ctl = (u16 *)&tape->caps[12]; 1759 u16 *ctl = (u16 *)&tape->caps[12];
2160 1760
2161 drive->pc_callback = ide_tape_callback; 1761 drive->pc_callback = ide_tape_callback;
2162 drive->pc_update_buffers = idetape_update_buffers;
2163 drive->pc_io_buffers = ide_tape_io_buffers;
2164 1762
2165 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP; 1763 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
2166 1764
@@ -2191,11 +1789,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2191 tape->buffer_size = *ctl * tape->blk_size; 1789 tape->buffer_size = *ctl * tape->blk_size;
2192 } 1790 }
2193 buffer_size = tape->buffer_size; 1791 buffer_size = tape->buffer_size;
2194 tape->pages_per_buffer = buffer_size / PAGE_SIZE;
2195 if (buffer_size % PAGE_SIZE) {
2196 tape->pages_per_buffer++;
2197 tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
2198 }
2199 1792
2200 /* select the "best" DSC read/write polling freq */ 1793 /* select the "best" DSC read/write polling freq */
2201 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); 1794 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2238,7 +1831,7 @@ static void ide_tape_release(struct device *dev)
2238 ide_drive_t *drive = tape->drive; 1831 ide_drive_t *drive = tape->drive;
2239 struct gendisk *g = tape->disk; 1832 struct gendisk *g = tape->disk;
2240 1833
2241 BUG_ON(tape->merge_bh_size); 1834 BUG_ON(tape->valid);
2242 1835
2243 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP; 1836 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2244 drive->driver_data = NULL; 1837 drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4aa6223c11be..a0c3e1b2f73c 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -385,7 +385,7 @@ out_end:
385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
386 ide_finish_cmd(drive, cmd, stat); 386 ide_finish_cmd(drive, cmd, stat);
387 else 387 else
388 ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 388 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
389 return ide_stopped; 389 return ide_stopped;
390out_err: 390out_err:
391 ide_error_cmd(drive, cmd); 391 ide_error_cmd(drive, cmd);
@@ -424,7 +424,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
424 424
425 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 425 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
426 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 426 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
427 rq->buffer = buf; 427
428 if (cmd->tf_flags & IDE_TFLAG_WRITE)
429 rq->cmd_flags |= REQ_RW;
428 430
429 /* 431 /*
430 * (ks) We transfer currently only whole sectors. 432 * (ks) We transfer currently only whole sectors.
@@ -432,18 +434,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
432 * if we would find a solution to transfer any size. 434 * if we would find a solution to transfer any size.
433 * To support special commands like READ LONG. 435 * To support special commands like READ LONG.
434 */ 436 */
435 rq->hard_nr_sectors = rq->nr_sectors = nsect; 437 if (nsect) {
436 rq->hard_cur_sectors = rq->current_nr_sectors = nsect; 438 error = blk_rq_map_kern(drive->queue, rq, buf,
437 439 nsect * SECTOR_SIZE, __GFP_WAIT);
438 if (cmd->tf_flags & IDE_TFLAG_WRITE) 440 if (error)
439 rq->cmd_flags |= REQ_RW; 441 goto put_req;
442 }
440 443
441 rq->special = cmd; 444 rq->special = cmd;
442 cmd->rq = rq; 445 cmd->rq = rq;
443 446
444 error = blk_execute_rq(drive->queue, NULL, rq, 0); 447 error = blk_execute_rq(drive->queue, NULL, rq, 0);
445 blk_put_request(rq);
446 448
449put_req:
450 blk_put_request(rq);
447 return error; 451 return error;
448} 452}
449 453
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 248a54bd2386..c2a16a8f486d 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
177 u8 clock = inb(high_16 + 0x11); 177 u8 clock = inb(high_16 + 0x11);
178 178
179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); 179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
180 word_count = (rq->nr_sectors << 8); 180 word_count = (blk_rq_sectors(rq) << 8);
181 word_count = (rq_data_dir(rq) == READ) ? 181 word_count = (rq_data_dir(rq) == READ) ?
182 word_count | 0x05000000 : 182 word_count | 0x05000000 :
183 word_count | 0x06000000; 183 word_count | 0x06000000;
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6f..05a93d6baecc 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
112 ide_hwif_t *hwif = drive->hwif; 112 ide_hwif_t *hwif = drive->hwif;
113 unsigned long sc_base = hwif->config_data; 113 unsigned long sc_base = hwif->config_data;
114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
115 unsigned long nsectors = hwif->rq->nr_sectors; 115 unsigned long nsectors = blk_rq_sectors(hwif->rq);
116 116
117 /* 117 /*
118 * We have to manually load the sector count and size into 118 * We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d23976..5ca76224f6d1 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? 307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); 308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
309 309
310 tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt); 310 tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
311 311
312 return 0; 312 return 0;
313} 313}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f0..c0bebc6a2f2c 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
672 msb->req_sg); 672 msb->req_sg);
673 673
674 if (!msb->seg_count) { 674 if (!msb->seg_count) {
675 chunk = __blk_end_request(msb->block_req, -ENOMEM, 675 chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
676 blk_rq_cur_bytes(msb->block_req));
677 continue; 676 continue;
678 } 677 }
679 678
680 t_sec = msb->block_req->sector << 9; 679 t_sec = blk_rq_pos(msb->block_req) << 9;
681 sector_div(t_sec, msb->page_size); 680 sector_div(t_sec, msb->page_size);
682 681
683 count = msb->block_req->nr_sectors << 9; 682 count = blk_rq_bytes(msb->block_req);
684 count /= msb->page_size; 683 count /= msb->page_size;
685 684
686 param.system = msb->system; 685 param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
705 return 0; 704 return 0;
706 } 705 }
707 706
708 dev_dbg(&card->dev, "elv_next\n"); 707 dev_dbg(&card->dev, "blk_fetch\n");
709 msb->block_req = elv_next_request(msb->queue); 708 msb->block_req = blk_fetch_request(msb->queue);
710 if (!msb->block_req) { 709 if (!msb->block_req) {
711 dev_dbg(&card->dev, "issue end\n"); 710 dev_dbg(&card->dev, "issue end\n");
712 return -EAGAIN; 711 return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
745 t_len *= msb->page_size; 744 t_len *= msb->page_size;
746 } 745 }
747 } else 746 } else
748 t_len = msb->block_req->nr_sectors << 9; 747 t_len = blk_rq_bytes(msb->block_req);
749 748
750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 749 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
751 750
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
825 return; 824 return;
826 825
827 if (msb->eject) { 826 if (msb->eject) {
828 while ((req = elv_next_request(q)) != NULL) 827 while ((req = blk_fetch_request(q)) != NULL)
829 __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 828 __blk_end_request_all(req, -ENODEV);
830 829
831 return; 830 return;
832 } 831 }
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b97..79f5433359f9 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1277 /* do we need to support multiple segments? */ 1277 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1280 ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1281 rsp->bio->bi_vcnt, rsp->data_len); 1281 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1282 return -EINVAL; 1282 return -EINVAL;
1283 } 1283 }
1284 1284
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1295 smpreq = (SmpPassthroughRequest_t *)mf; 1295 smpreq = (SmpPassthroughRequest_t *)mf;
1296 memset(smpreq, 0, sizeof(*smpreq)); 1296 memset(smpreq, 0, sizeof(*smpreq));
1297 1297
1298 smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 1298 smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
1300 1300
1301 if (rphy) 1301 if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 1321 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 1322 MPI_SGE_FLAGS_DIRECTION |
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (req->data_len - 4); 1324 flagsLength |= (blk_rq_bytes(req) - 4);
1325 1325
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 req->data_len, PCI_DMA_BIDIRECTIONAL); 1327 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 1328 if (!dma_addr_out)
1329 goto put_mf; 1329 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 1330 mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1332 1332
1333 /* response */ 1333 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
1335 flagsLength |= rsp->data_len + 4; 1335 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1337 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 1338 if (!dma_addr_in)
1339 goto unmap; 1339 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 1340 mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
1358 memcpy(req->sense, smprep, sizeof(*smprep)); 1358 memcpy(req->sense, smprep, sizeof(*smprep));
1359 req->sense_len = sizeof(*smprep); 1359 req->sense_len = sizeof(*smprep);
1360 req->data_len = 0; 1360 req->resid_len = 0;
1361 rsp->data_len -= smprep->ResponseDataLength; 1361 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 1362 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 1364 ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1366 } 1366 }
1367unmap: 1367unmap:
1368 if (dma_addr_out) 1368 if (dma_addr_out)
1369 pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 1369 pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
1370 PCI_DMA_BIDIRECTIONAL); 1370 PCI_DMA_BIDIRECTIONAL);
1371 if (dma_addr_in) 1371 if (dma_addr_in)
1372 pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 1372 pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
1373 PCI_DMA_BIDIRECTIONAL); 1373 PCI_DMA_BIDIRECTIONAL);
1374put_mf: 1374put_mf:
1375 if (mf) 1375 if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc41..6573ef4408f1 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (blk_end_request(req, error, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes))
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431
432 if (blk_pc_request(req))
433 leftover = req->data_len;
434
435 if (error) 430 if (error)
436 blk_end_request(req, -EIO, leftover); 431 blk_end_request_all(req, -EIO);
437 }
438 432
439 spin_lock_irqsave(q->queue_lock, flags); 433 spin_lock_irqsave(q->queue_lock, flags);
440 434
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 755 break;
762 756
763 case CACHE_SMARTFETCH: 757 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 758 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 759 ctl_flags = 0x201F0008;
766 else 760 else
767 ctl_flags = 0x001F0000; 761 ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 775 ctl_flags = 0x001F0010;
782 break; 776 break;
783 case CACHE_SMARTBACK: 777 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 778 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 779 ctl_flags = 0x001F0004;
786 else 780 else
787 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
788 break; 782 break;
789 case CACHE_SMARTTHROUGH: 783 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
792 else 786 else
793 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
@@ -827,22 +821,22 @@ static int i2o_block_transfer(struct request *req)
827 821
828 *mptr++ = cpu_to_le32(scsi_flags); 822 *mptr++ = cpu_to_le32(scsi_flags);
829 823
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 824 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 825 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 826
833 memcpy(mptr, cmd, 10); 827 memcpy(mptr, cmd, 10);
834 mptr += 4; 828 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 829 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
836 } else 830 } else
837#endif 831#endif
838 { 832 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 833 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 834 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 835 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
842 *mptr++ = 836 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 837 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 838 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 839 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 840 }
847 841
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 842 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +877,7 @@ static void i2o_block_request_fn(struct request_queue *q)
883 struct request *req; 877 struct request *req;
884 878
885 while (!blk_queue_plugged(q)) { 879 while (!blk_queue_plugged(q)) {
886 req = elv_next_request(q); 880 req = blk_peek_request(q);
887 if (!req) 881 if (!req)
888 break; 882 break;
889 883
@@ -896,7 +890,7 @@ static void i2o_block_request_fn(struct request_queue *q)
896 890
897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 891 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
898 if (!i2o_block_transfer(req)) { 892 if (!i2o_block_transfer(req)) {
899 blkdev_dequeue_request(req); 893 blk_start_request(req);
900 continue; 894 continue;
901 } else 895 } else
902 osm_info("transfer error\n"); 896 osm_info("transfer error\n");
@@ -922,8 +916,10 @@ static void i2o_block_request_fn(struct request_queue *q)
922 blk_stop_queue(q); 916 blk_stop_queue(q);
923 break; 917 break;
924 } 918 }
925 } else 919 } else {
926 end_request(req, 0); 920 blk_start_request(req);
921 __blk_end_request_all(req, -EIO);
922 }
927 } 923 }
928}; 924};
929 925
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516ae..c5df86546458 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
243 brq.mrq.cmd = &brq.cmd; 243 brq.mrq.cmd = &brq.cmd;
244 brq.mrq.data = &brq.data; 244 brq.mrq.data = &brq.data;
245 245
246 brq.cmd.arg = req->sector; 246 brq.cmd.arg = blk_rq_pos(req);
247 if (!mmc_card_blockaddr(card)) 247 if (!mmc_card_blockaddr(card))
248 brq.cmd.arg <<= 9; 248 brq.cmd.arg <<= 9;
249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 251 brq.stop.opcode = MMC_STOP_TRANSMISSION;
252 brq.stop.arg = 0; 252 brq.stop.arg = 0;
253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
254 brq.data.blocks = req->nr_sectors; 254 brq.data.blocks = blk_rq_sectors(req);
255 255
256 /* 256 /*
257 * The block layer doesn't support all sector count 257 * The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
301 * Adjust the sg list so it is the same size as the 301 * Adjust the sg list so it is the same size as the
302 * request. 302 * request.
303 */ 303 */
304 if (brq.data.blocks != req->nr_sectors) { 304 if (brq.data.blocks != blk_rq_sectors(req)) {
305 int i, data_size = brq.data.blocks << 9; 305 int i, data_size = brq.data.blocks << 9;
306 struct scatterlist *sg; 306 struct scatterlist *sg;
307 307
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
352 printk(KERN_ERR "%s: error %d transferring data," 352 printk(KERN_ERR "%s: error %d transferring data,"
353 " sector %u, nr %u, card status %#x\n", 353 " sector %u, nr %u, card status %#x\n",
354 req->rq_disk->disk_name, brq.data.error, 354 req->rq_disk->disk_name, brq.data.error,
355 (unsigned)req->sector, 355 (unsigned)blk_rq_pos(req),
356 (unsigned)req->nr_sectors, status); 356 (unsigned)blk_rq_sectors(req), status);
357 } 357 }
358 358
359 if (brq.stop.error) { 359 if (brq.stop.error) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c67..49e582356c65 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
55 spin_lock_irq(q->queue_lock); 55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q)) 57 if (!blk_queue_plugged(q))
58 req = elv_next_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
88{ 88{
89 struct mmc_queue *mq = q->queuedata; 89 struct mmc_queue *mq = q->queuedata;
90 struct request *req; 90 struct request *req;
91 int ret;
92 91
93 if (!mq) { 92 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) { 94 while ((req = blk_fetch_request(q)) != NULL)
96 do { 95 __blk_end_request_all(req, -EIO);
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
99 } while (ret);
100 }
101 return; 96 return;
102 } 97 }
103 98
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb1..502622f628bc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
47 unsigned long block, nsect; 47 unsigned long block, nsect;
48 char *buf; 48 char *buf;
49 49
50 block = req->sector << 9 >> tr->blkshift; 50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
52 52
53 buf = req->buffer; 53 buf = req->buffer;
54 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD) 56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect); 57 return tr->discard(dev, block, nsect);
58 58
59 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
60 return 0; 60 return -EIO;
61 61
62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 return 0; 63 get_capacity(req->rq_disk))
64 return -EIO;
64 65
65 switch(rq_data_dir(req)) { 66 switch(rq_data_dir(req)) {
66 case READ: 67 case READ:
67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
68 if (tr->readsect(dev, block, buf)) 69 if (tr->readsect(dev, block, buf))
69 return 0; 70 return -EIO;
70 return 1; 71 return 0;
71 72
72 case WRITE: 73 case WRITE:
73 if (!tr->writesect) 74 if (!tr->writesect)
74 return 0; 75 return -EIO;
75 76
76 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
77 if (tr->writesect(dev, block, buf)) 78 if (tr->writesect(dev, block, buf))
78 return 0; 79 return -EIO;
79 return 1; 80 return 0;
80 81
81 default: 82 default:
82 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
83 return 0; 84 return -EIO;
84 } 85 }
85} 86}
86 87
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
88{ 89{
89 struct mtd_blktrans_ops *tr = arg; 90 struct mtd_blktrans_ops *tr = arg;
90 struct request_queue *rq = tr->blkcore_priv->rq; 91 struct request_queue *rq = tr->blkcore_priv->rq;
92 struct request *req = NULL;
91 93
92 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 94 /* we might get involved when memory gets low, so use PF_MEMALLOC */
93 current->flags |= PF_MEMALLOC; 95 current->flags |= PF_MEMALLOC;
94 96
95 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
98
96 while (!kthread_should_stop()) { 99 while (!kthread_should_stop()) {
97 struct request *req;
98 struct mtd_blktrans_dev *dev; 100 struct mtd_blktrans_dev *dev;
99 int res = 0; 101 int res;
100
101 req = elv_next_request(rq);
102 102
103 if (!req) { 103 if (!req && !(req = blk_fetch_request(rq))) {
104 set_current_state(TASK_INTERRUPTIBLE); 104 set_current_state(TASK_INTERRUPTIBLE);
105 spin_unlock_irq(rq->queue_lock); 105 spin_unlock_irq(rq->queue_lock);
106 schedule(); 106 schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
119 119
120 spin_lock_irq(rq->queue_lock); 120 spin_lock_irq(rq->queue_lock);
121 121
122 end_request(req, res); 122 if (!__blk_end_request_cur(req, res))
123 req = NULL;
123 } 124 }
125
126 if (req)
127 __blk_end_request_all(req, -EIO);
128
124 spin_unlock_irq(rq->queue_lock); 129 spin_unlock_irq(rq->queue_lock);
125 130
126 return 0; 131 return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c435..e64f62d5e0fc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
603 if (dasd_profile_level != DASD_PROFILE_ON) 603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return; 604 return;
605 605
606 sectors = req->nr_sectors; 606 sectors = blk_rq_sectors(req);
607 if (!cqr->buildclk || !cqr->startclk || 607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk || 608 !cqr->stopclk || !cqr->endclk ||
609 !sectors) 609 !sectors)
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
1614} 1614}
1615 1615
1616/* 1616/*
1617 * posts the buffer_cache about a finalized request
1618 */
1619static inline void dasd_end_request(struct request *req, int error)
1620{
1621 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1622 BUG();
1623}
1624
1625/*
1626 * Process finished error recovery ccw. 1617 * Process finished error recovery ccw.
1627 */ 1618 */
1628static inline void __dasd_block_process_erp(struct dasd_block *block, 1619static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1665 if (basedev->state < DASD_STATE_READY) 1656 if (basedev->state < DASD_STATE_READY)
1666 return; 1657 return;
1667 /* Now we try to fetch requests from the request queue */ 1658 /* Now we try to fetch requests from the request queue */
1668 while (!blk_queue_plugged(queue) && 1659 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1669 elv_next_request(queue)) {
1670
1671 req = elv_next_request(queue);
1672
1673 if (basedev->features & DASD_FEATURE_READONLY && 1660 if (basedev->features & DASD_FEATURE_READONLY &&
1674 rq_data_dir(req) == WRITE) { 1661 rq_data_dir(req) == WRITE) {
1675 DBF_DEV_EVENT(DBF_ERR, basedev, 1662 DBF_DEV_EVENT(DBF_ERR, basedev,
1676 "Rejecting write request %p", 1663 "Rejecting write request %p",
1677 req); 1664 req);
1678 blkdev_dequeue_request(req); 1665 blk_start_request(req);
1679 dasd_end_request(req, -EIO); 1666 __blk_end_request_all(req, -EIO);
1680 continue; 1667 continue;
1681 } 1668 }
1682 cqr = basedev->discipline->build_cp(basedev, block, req); 1669 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1704 "CCW creation failed (rc=%ld) " 1691 "CCW creation failed (rc=%ld) "
1705 "on request %p", 1692 "on request %p",
1706 PTR_ERR(cqr), req); 1693 PTR_ERR(cqr), req);
1707 blkdev_dequeue_request(req); 1694 blk_start_request(req);
1708 dasd_end_request(req, -EIO); 1695 __blk_end_request_all(req, -EIO);
1709 continue; 1696 continue;
1710 } 1697 }
1711 /* 1698 /*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1714 */ 1701 */
1715 cqr->callback_data = (void *) req; 1702 cqr->callback_data = (void *) req;
1716 cqr->status = DASD_CQR_FILLED; 1703 cqr->status = DASD_CQR_FILLED;
1717 blkdev_dequeue_request(req); 1704 blk_start_request(req);
1718 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1705 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1719 dasd_profile_start(block, cqr, req); 1706 dasd_profile_start(block, cqr, req);
1720 } 1707 }
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1718 status = cqr->block->base->discipline->free_cp(cqr, req);
1732 if (status <= 0) 1719 if (status <= 0)
1733 error = status ? status : -EIO; 1720 error = status ? status : -EIO;
1734 dasd_end_request(req, error); 1721 __blk_end_request_all(req, error);
1735} 1722}
1736 1723
1737/* 1724/*
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2038 return; 2025 return;
2039 2026
2040 spin_lock_irq(&block->request_queue_lock); 2027 spin_lock_irq(&block->request_queue_lock);
2041 while ((req = elv_next_request(block->request_queue))) { 2028 while ((req = blk_fetch_request(block->request_queue)))
2042 blkdev_dequeue_request(req); 2029 __blk_end_request_all(req, -EIO);
2043 dasd_end_request(req, -EIO);
2044 }
2045 spin_unlock_irq(&block->request_queue_lock); 2030 spin_unlock_irq(&block->request_queue_lock);
2046} 2031}
2047 2032
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f7733446..2efaddfae560 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
505 return ERR_PTR(-EINVAL); 505 return ERR_PTR(-EINVAL);
506 blksize = block->bp_block; 506 blksize = block->bp_block;
507 /* Calculate record id of first and last block. */ 507 /* Calculate record id of first and last block. */
508 first_rec = req->sector >> block->s2b_shift; 508 first_rec = blk_rq_pos(req) >> block->s2b_shift;
509 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 509 last_rec =
510 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
510 /* Check struct bio and count the number of blocks for the request. */ 511 /* Check struct bio and count the number of blocks for the request. */
511 count = 0; 512 count = 0;
512 rq_for_each_segment(bv, req, iter) { 513 rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f06..a41c94053e64 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2354 blksize = block->bp_block; 2354 blksize = block->bp_block;
2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2356 /* Calculate record id of first and last block. */ 2356 /* Calculate record id of first and last block. */
2357 first_rec = first_trk = req->sector >> block->s2b_shift; 2357 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2358 first_offs = sector_div(first_trk, blk_per_trk); 2358 first_offs = sector_div(first_trk, blk_per_trk);
2359 last_rec = last_trk = 2359 last_rec = last_trk =
2360 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2361 last_offs = sector_div(last_trk, blk_per_trk); 2361 last_offs = sector_div(last_trk, blk_per_trk);
2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2363 2363
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2420 private = (struct dasd_eckd_private *) cqr->block->base->private;
2421 blksize = cqr->block->bp_block; 2421 blksize = cqr->block->bp_block;
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2423 recid = req->sector >> cqr->block->s2b_shift; 2423 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2424 ccw = cqr->cpaddr; 2424 ccw = cqr->cpaddr;
2425 /* Skip over define extent & locate record. */ 2425 /* Skip over define extent & locate record. */
2426 ccw++; 2426 ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd14673..8912358daa2f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
270 return ERR_PTR(-EINVAL); 270 return ERR_PTR(-EINVAL);
271 blksize = block->bp_block; 271 blksize = block->bp_block;
272 /* Calculate record id of first and last block. */ 272 /* Calculate record id of first and last block. */
273 first_rec = req->sector >> block->s2b_shift; 273 first_rec = blk_rq_pos(req) >> block->s2b_shift;
274 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 274 last_rec =
275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
275 /* Check struct bio and count the number of blocks for the request. */ 276 /* Check struct bio and count the number of blocks for the request. */
276 count = 0; 277 count = 0;
277 cidaw = 0; 278 cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
309 ccw = cqr->cpaddr; 310 ccw = cqr->cpaddr;
310 /* First ccw is define extent. */ 311 /* First ccw is define extent. */
311 define_extent(ccw++, cqr->data, rq_data_dir(req), 312 define_extent(ccw++, cqr->data, rq_data_dir(req),
312 block->bp_block, req->sector, req->nr_sectors); 313 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
313 /* Build locate_record + read/write ccws. */ 314 /* Build locate_record + read/write ccws. */
314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 315 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
315 LO_data = (struct LO_fba_data *) (idaws + cidaw); 316 LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd3..2d00a383a475 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134 /* Setup ccws. */ 1134 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1135 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1136 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1139
1140 ccw = request->cpaddr; 1140 ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd7..c453b2f3e9f4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
633 struct req_iterator iter; 633 struct req_iterator iter;
634 634
635 DBF_EVENT(6, "xBREDid:"); 635 DBF_EVENT(6, "xBREDid:");
636 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
637 DBF_EVENT(6, "start_block = %i\n", start_block); 637 DBF_EVENT(6, "start_block = %i\n", start_block);
638 638
639 rq_for_each_segment(bv, req, iter) 639 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f2..1e7967675980 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int error)
78{
79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG();
81}
82
83static void
84__tapeblock_end_request(struct tape_request *ccw_req, void *data) 77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
85{ 78{
86 struct tape_device *device; 79 struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
90 83
91 device = ccw_req->device; 84 device = ccw_req->device;
92 req = (struct request *) data; 85 req = (struct request *) data;
93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
95 /* Update position. */ 88 /* Update position. */
96 device->blk_data.block_position = 89 device->blk_data.block_position =
97 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
98 else 91 else
99 /* We lost the position information due to an error. */ 92 /* We lost the position information due to an error. */
100 device->blk_data.block_position = -1; 93 device->blk_data.block_position = -1;
101 device->discipline->free_bread(ccw_req); 94 device->discipline->free_bread(ccw_req);
102 if (!list_empty(&device->req_queue) || 95 if (!list_empty(&device->req_queue) ||
103 elv_next_request(device->blk_data.request_queue)) 96 blk_peek_request(device->blk_data.request_queue))
104 tapeblock_trigger_requeue(device); 97 tapeblock_trigger_requeue(device);
105} 98}
106 99
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
118 ccw_req = device->discipline->bread(device, req); 111 ccw_req = device->discipline->bread(device, req);
119 if (IS_ERR(ccw_req)) { 112 if (IS_ERR(ccw_req)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 113 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req, -EIO); 114 blk_end_request_all(req, -EIO);
122 return PTR_ERR(ccw_req); 115 return PTR_ERR(ccw_req);
123 } 116 }
124 ccw_req->callback = __tapeblock_end_request; 117 ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
131 * Start/enqueueing failed. No retries in 124 * Start/enqueueing failed. No retries in
132 * this case. 125 * this case.
133 */ 126 */
134 tapeblock_end_request(req, -EIO); 127 blk_end_request_all(req, -EIO);
135 device->discipline->free_bread(ccw_req); 128 device->discipline->free_bread(ccw_req);
136 } 129 }
137 130
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
169 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
170 while ( 163 while (
171 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
172 elv_next_request(queue) && 165 (req = blk_fetch_request(queue)) &&
173 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
174 ) { 167 ) {
175 req = elv_next_request(queue);
176 if (rq_data_dir(req) == WRITE) { 168 if (rq_data_dir(req) == WRITE) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock); 170 spin_unlock_irq(&device->blk_data.request_queue_lock);
180 tapeblock_end_request(req, -EIO); 171 blk_end_request_all(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock); 172 spin_lock_irq(&device->blk_data.request_queue_lock);
182 continue; 173 continue;
183 } 174 }
184 blkdev_dequeue_request(req);
185 nr_queued++; 175 nr_queued++;
186 spin_unlock_irq(&device->blk_data.request_queue_lock); 176 spin_unlock_irq(&device->blk_data.request_queue_lock);
187 rc = tapeblock_start_request(device, req); 177 rc = tapeblock_start_request(device, req);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e8548..6d4651684688 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
186{ 186{
187 struct request *req; 187 struct request *req;
188 188
189 while ((req = elv_next_request(q)) != NULL) { 189 req = blk_fetch_request(q);
190 while (req) {
190 struct jsfd_part *jdp = req->rq_disk->private_data; 191 struct jsfd_part *jdp = req->rq_disk->private_data;
191 unsigned long offset = req->sector << 9; 192 unsigned long offset = blk_rq_pos(req) << 9;
192 size_t len = req->current_nr_sectors << 9; 193 size_t len = blk_rq_cur_bytes(req);
194 int err = -EIO;
193 195
194 if ((offset + len) > jdp->dsize) { 196 if ((offset + len) > jdp->dsize)
195 end_request(req, 0); 197 goto end;
196 continue;
197 }
198 198
199 if (rq_data_dir(req) != READ) { 199 if (rq_data_dir(req) != READ) {
200 printk(KERN_ERR "jsfd: write\n"); 200 printk(KERN_ERR "jsfd: write\n");
201 end_request(req, 0); 201 goto end;
202 continue;
203 } 202 }
204 203
205 if ((jdp->dbase & 0xff000000) != 0x20000000) { 204 if ((jdp->dbase & 0xff000000) != 0x20000000) {
206 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); 205 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
207 end_request(req, 0); 206 goto end;
208 continue;
209 } 207 }
210 208
211 jsfd_read(req->buffer, jdp->dbase + offset, len); 209 jsfd_read(req->buffer, jdp->dbase + offset, len);
212 210 err = 0;
213 end_request(req, 1); 211 end:
212 if (!__blk_end_request_cur(req, err))
213 req = blk_fetch_request(q);
214 } 214 }
215} 215}
216 216
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b5..c7076ce25e21 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e436788..54fa1e42dc4d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48a..1bc3b7567994 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c7..8032c5adb6a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1313 uint64_t failing_sector = 0;
1314 1314
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1316 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1318 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1319
1320 spin_lock(&_dump_buf_lock); 1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1321 if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 2378 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 2380 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 2381 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 2382 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 2383 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 2384 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 2386 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 2387 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 2388 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 2389 blk_rq_sectors(cmnd->request),
2390 cmnd); 2390 cmnd);
2391 2391
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 2406 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 2408 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 2409 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 2410 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 2411 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 2412 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 2414 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 2415 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 2416 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 2417 blk_rq_sectors(cmnd->request), cmnd);
2418 else 2418 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 2420 "9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a0..5c65da519e39 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab2..5776b2ab6b12 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 889}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 890EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 891
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 892static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 893 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 894 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 904 else
925 pad_buff = io->pad_buff; 905 pad_buff = io->pad_buff;
926 906
927 ret = _append_map_kern(io->req, pad_buff, padding, 907 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 908 or->alloc_flags);
929 if (ret) 909 if (ret)
930 return ret; 910 return ret;
931 io->total_bytes += padding; 911 io->total_bytes += padding;
932 } 912 }
933 913
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 914 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 915 or->alloc_flags);
936 if (ret) 916 if (ret)
937 return ret; 917 return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1273/*
1294 * osd_finalize_request and helpers 1274 * osd_finalize_request and helpers
1295 */ 1275 */
1276static struct request *_make_request(struct request_queue *q, bool has_write,
1277 struct _osd_io_info *oii, gfp_t flags)
1278{
1279 if (oii->bio)
1280 return blk_make_request(q, oii->bio, flags);
1281 else {
1282 struct request *req;
1283
1284 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1285 if (unlikely(!req))
1286 return ERR_PTR(-ENOMEM);
1287
1288 return req;
1289 }
1290}
1296 1291
1297static int _init_blk_request(struct osd_request *or, 1292static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1293 bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1296 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1297 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1298 struct request *req;
1304 int ret = -ENOMEM; 1299 int ret;
1305 1300
1306 req = blk_get_request(q, has_out, flags); 1301 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1302 if (IS_ERR(req)) {
1303 ret = PTR_ERR(req);
1308 goto out; 1304 goto out;
1305 }
1309 1306
1310 or->request = req; 1307 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1308 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1315 or->out.req = req;
1319 if (has_in) { 1316 if (has_in) {
1320 /* allocate bidi request */ 1317 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1318 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1319 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1320 OSD_DEBUG("blk_get_request for bidi failed\n");
1321 ret = PTR_ERR(req);
1324 goto out; 1322 goto out;
1325 } 1323 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1324 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1362 return ret;
1365 } 1363 }
1366 1364
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1365 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1366 or->in.pad_buff = sg_in_pad_buffer;
1389 1367
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e98..dd3f9d2b99fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1359 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1360 struct Scsi_Host *shost = sdev->host;
1390 1361
1391 blkdev_dequeue_request(req); 1362 blk_start_request(req);
1392 1363
1393 if (unlikely(cmd == NULL)) { 1364 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1365 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1451
1481 if (!sdev) { 1452 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1453 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1454 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1455 scsi_kill_request(req, q);
1485 return; 1456 return;
1486 } 1457 }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1472 * that the request is fully prepared even if we cannot
1502 * accept it. 1473 * accept it.
1503 */ 1474 */
1504 req = elv_next_request(q); 1475 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1477 break;
1507 1478
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1488 * Remove the request from the request list.
1518 */ 1489 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1491 blk_start_request(req);
1521 sdev->device_busy++; 1492 sdev->device_busy++;
1522 1493
1523 spin_unlock(q->queue_lock); 1494 spin_unlock(q->queue_lock);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6a..10303272ba45 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2d..d606452297cf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b637..40d2860f235a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff492797..82f14a9482d0 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd47..0fc2c0ae7691 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1260,7 +1260,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1260 1260
1261 sense = rq->sense; 1261 sense = rq->sense;
1262 result = rq->errors; 1262 result = rq->errors;
1263 resid = rq->data_len; 1263 resid = rq->resid_len;
1264 1264
1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1266 sdp->disk->disk_name, srp->header.pack_id, result)); 1266 sdp->disk->disk_name, srp->header.pack_id, result));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad5..fddba53c7fe5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f14..8681b708344f 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cbe..54023d41fd15 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807