aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/plat-omap/mailbox.c63
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--block/Kconfig11
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blk-barrier.c19
-rw-r--r--block/blk-core.c740
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-map.c21
-rw-r--r--block/blk-merge.c46
-rw-r--r--block/blk-tag.c17
-rw-r--r--block/blk-timeout.c22
-rw-r--r--block/blk.h51
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c36
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c167
-rw-r--r--block/scsi_ioctl.c5
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/cciss.c39
-rw-r--r--drivers/block/cpqarray.c16
-rw-r--r--drivers/block/floppy.c58
-rw-r--r--drivers/block/hd.c104
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c535
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/ps3disk.c22
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c48
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c75
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c32
-rw-r--r--drivers/block/xsysace.c44
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/cdrom/gdrom.c34
-rw-r--r--drivers/cdrom/viocd.c29
-rw-r--r--drivers/ide/ide-atapi.c177
-rw-r--r--drivers/ide/ide-cd.c140
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c9
-rw-r--r--drivers/ide/ide-dma.c22
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-io.c57
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-park.c7
-rw-r--r--drivers/ide/ide-pm.c38
-rw-r--r--drivers/ide/ide-tape.c735
-rw-r--r--drivers/ide/ide-taskfile.c20
-rw-r--r--drivers/ide/pdc202xx_old.c2
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/memstick/core/mspro_block.c17
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c38
-rw-r--r--drivers/mmc/card/block.c10
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mtd/mtd_blkdevs.c41
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c24
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c23
-rw-r--r--drivers/scsi/osd/osd_initiator.c72
-rw-r--r--drivers/scsi/scsi_lib.c87
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c24
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c15
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--fs/bio.c2
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/coda/file.c9
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/read_write.c7
-rw-r--r--fs/splice.c338
-rw-r--r--include/linux/bio.h8
-rw-r--r--include/linux/blkdev.h119
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ide.h27
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/virtio_blk.h8
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--kernel/trace/blktrace.c16
110 files changed, 2604 insertions, 2893 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 0abfbaa5987..40424edae93 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
147 return ret; 147 return ret;
148} 148}
149 149
150struct omap_msg_tx_data {
151 mbox_msg_t msg;
152 void *arg;
153};
154
155static void omap_msg_tx_end_io(struct request *rq, int error)
156{
157 kfree(rq->special);
158 __blk_put_request(rq->q, rq);
159}
160
150int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg) 161int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
151{ 162{
163 struct omap_msg_tx_data *tx_data;
152 struct request *rq; 164 struct request *rq;
153 struct request_queue *q = mbox->txq->queue; 165 struct request_queue *q = mbox->txq->queue;
154 int ret = 0; 166
167 tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
168 if (unlikely(!tx_data))
169 return -ENOMEM;
155 170
156 rq = blk_get_request(q, WRITE, GFP_ATOMIC); 171 rq = blk_get_request(q, WRITE, GFP_ATOMIC);
157 if (unlikely(!rq)) { 172 if (unlikely(!rq)) {
158 ret = -ENOMEM; 173 kfree(tx_data);
159 goto fail; 174 return -ENOMEM;
160 } 175 }
161 176
162 rq->data = (void *)msg; 177 tx_data->msg = msg;
163 blk_insert_request(q, rq, 0, arg); 178 tx_data->arg = arg;
179 rq->end_io = omap_msg_tx_end_io;
180 blk_insert_request(q, rq, 0, tx_data);
164 181
165 schedule_work(&mbox->txq->work); 182 schedule_work(&mbox->txq->work);
166 fail: 183 return 0;
167 return ret;
168} 184}
169EXPORT_SYMBOL(omap_mbox_msg_send); 185EXPORT_SYMBOL(omap_mbox_msg_send);
170 186
@@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
178 struct request_queue *q = mbox->txq->queue; 194 struct request_queue *q = mbox->txq->queue;
179 195
180 while (1) { 196 while (1) {
197 struct omap_msg_tx_data *tx_data;
198
181 spin_lock(q->queue_lock); 199 spin_lock(q->queue_lock);
182 rq = elv_next_request(q); 200 rq = blk_fetch_request(q);
183 spin_unlock(q->queue_lock); 201 spin_unlock(q->queue_lock);
184 202
185 if (!rq) 203 if (!rq)
186 break; 204 break;
187 205
188 ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special); 206 tx_data = rq->special;
207
208 ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
189 if (ret) { 209 if (ret) {
190 enable_mbox_irq(mbox, IRQ_TX); 210 enable_mbox_irq(mbox, IRQ_TX);
211 spin_lock(q->queue_lock);
212 blk_requeue_request(q, rq);
213 spin_unlock(q->queue_lock);
191 return; 214 return;
192 } 215 }
193 216
194 spin_lock(q->queue_lock); 217 spin_lock(q->queue_lock);
195 if (__blk_end_request(rq, 0, 0)) 218 __blk_end_request_all(rq, 0);
196 BUG();
197 spin_unlock(q->queue_lock); 219 spin_unlock(q->queue_lock);
198 } 220 }
199} 221}
@@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
218 240
219 while (1) { 241 while (1) {
220 spin_lock_irqsave(q->queue_lock, flags); 242 spin_lock_irqsave(q->queue_lock, flags);
221 rq = elv_next_request(q); 243 rq = blk_fetch_request(q);
222 spin_unlock_irqrestore(q->queue_lock, flags); 244 spin_unlock_irqrestore(q->queue_lock, flags);
223 if (!rq) 245 if (!rq)
224 break; 246 break;
225 247
226 msg = (mbox_msg_t) rq->data; 248 msg = (mbox_msg_t)rq->special;
227 249 blk_end_request_all(rq, 0);
228 if (blk_end_request(rq, 0, 0))
229 BUG();
230
231 mbox->rxq->callback((void *)msg); 250 mbox->rxq->callback((void *)msg);
232 } 251 }
233} 252}
@@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
264 goto nomem; 283 goto nomem;
265 284
266 msg = mbox_fifo_read(mbox); 285 msg = mbox_fifo_read(mbox);
267 rq->data = (void *)msg;
268 286
269 if (unlikely(mbox_seq_test(mbox, msg))) { 287 if (unlikely(mbox_seq_test(mbox, msg))) {
270 pr_info("mbox: Illegal seq bit!(%08x)\n", msg); 288 pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
272 mbox->err_notify(); 290 mbox->err_notify();
273 } 291 }
274 292
275 blk_insert_request(q, rq, 0, NULL); 293 blk_insert_request(q, rq, 0, (void *)msg);
276 if (mbox->ops->type == OMAP_MBOX_TYPE1) 294 if (mbox->ops->type == OMAP_MBOX_TYPE1)
277 break; 295 break;
278 } 296 }
@@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
329 347
330 while (1) { 348 while (1) {
331 spin_lock_irqsave(q->queue_lock, flags); 349 spin_lock_irqsave(q->queue_lock, flags);
332 rq = elv_next_request(q); 350 rq = blk_fetch_request(q);
333 spin_unlock_irqrestore(q->queue_lock, flags); 351 spin_unlock_irqrestore(q->queue_lock, flags);
334 352
335 if (!rq) 353 if (!rq)
336 break; 354 break;
337 355
338 *p = (mbox_msg_t) rq->data; 356 *p = (mbox_msg_t)rq->special;
339 357
340 if (blk_end_request(rq, 0, 0)) 358 blk_end_request_all(rq, 0);
341 BUG();
342 359
343 if (unlikely(mbox_seq_test(mbox, *p))) { 360 if (unlikely(mbox_seq_test(mbox, *p))) {
344 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); 361 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8e..aa9e926e13d 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
451 451
452/* Only changed by ubd_init, which is an initcall. */ 452/* Only changed by ubd_init, which is an initcall. */
453static int thread_fd = -1; 453static int thread_fd = -1;
454
455static void ubd_end_request(struct request *req, int bytes, int error)
456{
457 blk_end_request(req, error, bytes);
458}
459
460/* Callable only from interrupt context - otherwise you need to do
461 * spin_lock_irq()/spin_lock_irqsave() */
462static inline void ubd_finish(struct request *req, int bytes)
463{
464 if(bytes < 0){
465 ubd_end_request(req, 0, -EIO);
466 return;
467 }
468 ubd_end_request(req, bytes, 0);
469}
470
471static LIST_HEAD(restart); 454static LIST_HEAD(restart);
472 455
473/* XXX - move this inside ubd_intr. */ 456/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
475static void ubd_handler(void) 458static void ubd_handler(void)
476{ 459{
477 struct io_thread_req *req; 460 struct io_thread_req *req;
478 struct request *rq;
479 struct ubd *ubd; 461 struct ubd *ubd;
480 struct list_head *list, *next_ele; 462 struct list_head *list, *next_ele;
481 unsigned long flags; 463 unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
492 return; 474 return;
493 } 475 }
494 476
495 rq = req->req; 477 blk_end_request(req->req, 0, req->length);
496 rq->nr_sectors -= req->length >> 9;
497 if(rq->nr_sectors == 0)
498 ubd_finish(rq, rq->hard_nr_sectors << 9);
499 kfree(req); 478 kfree(req);
500 } 479 }
501 reactivate_fd(thread_fd, UBD_IRQ); 480 reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
1243{ 1222{
1244 struct io_thread_req *io_req; 1223 struct io_thread_req *io_req;
1245 struct request *req; 1224 struct request *req;
1246 int n, last_sectors; 1225 sector_t sector;
1226 int n;
1247 1227
1248 while(1){ 1228 while(1){
1249 struct ubd *dev = q->queuedata; 1229 struct ubd *dev = q->queuedata;
1250 if(dev->end_sg == 0){ 1230 if(dev->end_sg == 0){
1251 struct request *req = elv_next_request(q); 1231 struct request *req = blk_fetch_request(q);
1252 if(req == NULL) 1232 if(req == NULL)
1253 return; 1233 return;
1254 1234
1255 dev->request = req; 1235 dev->request = req;
1256 blkdev_dequeue_request(req);
1257 dev->start_sg = 0; 1236 dev->start_sg = 0;
1258 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1237 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1259 } 1238 }
1260 1239
1261 req = dev->request; 1240 req = dev->request;
1262 last_sectors = 0; 1241 sector = blk_rq_pos(req);
1263 while(dev->start_sg < dev->end_sg){ 1242 while(dev->start_sg < dev->end_sg){
1264 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1243 struct scatterlist *sg = &dev->sg[dev->start_sg];
1265 1244
1266 req->sector += last_sectors;
1267 io_req = kmalloc(sizeof(struct io_thread_req), 1245 io_req = kmalloc(sizeof(struct io_thread_req),
1268 GFP_ATOMIC); 1246 GFP_ATOMIC);
1269 if(io_req == NULL){ 1247 if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
1272 return; 1250 return;
1273 } 1251 }
1274 prepare_request(req, io_req, 1252 prepare_request(req, io_req,
1275 (unsigned long long) req->sector << 9, 1253 (unsigned long long)sector << 9,
1276 sg->offset, sg->length, sg_page(sg)); 1254 sg->offset, sg->length, sg_page(sg));
1277 1255
1278 last_sectors = sg->length >> 9; 1256 sector += sg->length >> 9;
1279 n = os_write_file(thread_fd, &io_req, 1257 n = os_write_file(thread_fd, &io_req,
1280 sizeof(struct io_thread_req *)); 1258 sizeof(struct io_thread_req *));
1281 if(n != sizeof(struct io_thread_req *)){ 1259 if(n != sizeof(struct io_thread_req *)){
diff --git a/block/Kconfig b/block/Kconfig
index e7d12782bcf..2c39527aa7d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,6 +26,7 @@ if BLOCK
26config LBD 26config LBD
27 bool "Support for large block devices and files" 27 bool "Support for large block devices and files"
28 depends on !64BIT 28 depends on !64BIT
29 default y
29 help 30 help
30 Enable block devices or files of size 2TB and larger. 31 Enable block devices or files of size 2TB and larger.
31 32
@@ -38,11 +39,13 @@ config LBD
38 39
39 The ext4 filesystem requires that this feature be enabled in 40 The ext4 filesystem requires that this feature be enabled in
40 order to support filesystems that have the huge_file feature 41 order to support filesystems that have the huge_file feature
41 enabled. Otherwise, it will refuse to mount any filesystems 42 enabled. Otherwise, it will refuse to mount in the read-write
42 that use the huge_file feature, which is enabled by default 43 mode any filesystems that use the huge_file feature, which is
43 by mke2fs.ext4. The GFS2 filesystem also requires this feature. 44 enabled by default by mke2fs.ext4.
44 45
45 If unsure, say N. 46 The GFS2 filesystem also requires this feature.
47
48 If unsure, say Y.
46 49
47config BLK_DEV_BSG 50config BLK_DEV_BSG
48 bool "Block layer SG support v4 (EXPERIMENTAL)" 51 bool "Block layer SG support v4 (EXPERIMENTAL)"
diff --git a/block/as-iosched.c b/block/as-iosched.c
index c48fa670d22..7a12cf6ee1d 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
306 data_dir = rq_is_sync(rq1); 306 data_dir = rq_is_sync(rq1);
307 307
308 last = ad->last_sector[data_dir]; 308 last = ad->last_sector[data_dir];
309 s1 = rq1->sector; 309 s1 = blk_rq_pos(rq1);
310 s2 = rq2->sector; 310 s2 = blk_rq_pos(rq2);
311 311
312 BUG_ON(data_dir != rq_is_sync(rq2)); 312 BUG_ON(data_dir != rq_is_sync(rq2));
313 313
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
566 as_update_thinktime(ad, aic, thinktime); 566 as_update_thinktime(ad, aic, thinktime);
567 567
568 /* Calculate read -> read seek distance */ 568 /* Calculate read -> read seek distance */
569 if (aic->last_request_pos < rq->sector) 569 if (aic->last_request_pos < blk_rq_pos(rq))
570 seek_dist = rq->sector - aic->last_request_pos; 570 seek_dist = blk_rq_pos(rq) -
571 aic->last_request_pos;
571 else 572 else
572 seek_dist = aic->last_request_pos - rq->sector; 573 seek_dist = aic->last_request_pos -
574 blk_rq_pos(rq);
573 as_update_seekdist(ad, aic, seek_dist); 575 as_update_seekdist(ad, aic, seek_dist);
574 } 576 }
575 aic->last_request_pos = rq->sector + rq->nr_sectors; 577 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
576 set_bit(AS_TASK_IOSTARTED, &aic->state); 578 set_bit(AS_TASK_IOSTARTED, &aic->state);
577 spin_unlock(&aic->lock); 579 spin_unlock(&aic->lock);
578 } 580 }
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
587{ 589{
588 unsigned long delay; /* jiffies */ 590 unsigned long delay; /* jiffies */
589 sector_t last = ad->last_sector[ad->batch_data_dir]; 591 sector_t last = ad->last_sector[ad->batch_data_dir];
590 sector_t next = rq->sector; 592 sector_t next = blk_rq_pos(rq);
591 sector_t delta; /* acceptable close offset (in sectors) */ 593 sector_t delta; /* acceptable close offset (in sectors) */
592 sector_t s; 594 sector_t s;
593 595
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
981 * This has to be set in order to be correctly updated by 983 * This has to be set in order to be correctly updated by
982 * as_find_next_rq 984 * as_find_next_rq
983 */ 985 */
984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 986 ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
985 987
986 if (data_dir == BLK_RW_SYNC) { 988 if (data_dir == BLK_RW_SYNC) {
987 struct io_context *ioc = RQ_IOC(rq); 989 struct io_context *ioc = RQ_IOC(rq);
@@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
1312static void as_work_handler(struct work_struct *work) 1314static void as_work_handler(struct work_struct *work)
1313{ 1315{
1314 struct as_data *ad = container_of(work, struct as_data, antic_work); 1316 struct as_data *ad = container_of(work, struct as_data, antic_work);
1315 struct request_queue *q = ad->q;
1316 unsigned long flags;
1317 1317
1318 spin_lock_irqsave(q->queue_lock, flags); 1318 blk_run_queue(ad->q);
1319 blk_start_queueing(q);
1320 spin_unlock_irqrestore(q->queue_lock, flags);
1321} 1319}
1322 1320
1323static int as_may_queue(struct request_queue *q, int rw) 1321static int as_may_queue(struct request_queue *q, int rw)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111fa05..0d98054cdbd 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
106 */ 106 */
107 q->ordseq = 0; 107 q->ordseq = 0;
108 rq = q->orig_bar_rq; 108 rq = q->orig_bar_rq;
109 109 __blk_end_request_all(rq, q->orderr);
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG();
112
113 return true; 110 return true;
114} 111}
115 112
@@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
166 * For an empty barrier, there's no actual BAR request, which 163 * For an empty barrier, there's no actual BAR request, which
167 * in turn makes POSTFLUSH unnecessary. Mask them off. 164 * in turn makes POSTFLUSH unnecessary. Mask them off.
168 */ 165 */
169 if (!rq->hard_nr_sectors) { 166 if (!blk_rq_sectors(rq)) {
170 q->ordered &= ~(QUEUE_ORDERED_DO_BAR | 167 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
171 QUEUE_ORDERED_DO_POSTFLUSH); 168 QUEUE_ORDERED_DO_POSTFLUSH);
172 /* 169 /*
@@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
183 } 180 }
184 181
185 /* stash away the original request */ 182 /* stash away the original request */
186 elv_dequeue_request(q, rq); 183 blk_dequeue_request(rq);
187 q->orig_bar_rq = rq; 184 q->orig_bar_rq = rq;
188 rq = NULL; 185 rq = NULL;
189 186
@@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
221 } else 218 } else
222 skip |= QUEUE_ORDSEQ_PREFLUSH; 219 skip |= QUEUE_ORDSEQ_PREFLUSH;
223 220
224 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 221 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
225 rq = NULL; 222 rq = NULL;
226 else 223 else
227 skip |= QUEUE_ORDSEQ_DRAIN; 224 skip |= QUEUE_ORDSEQ_DRAIN;
@@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
251 * Queue ordering not supported. Terminate 248 * Queue ordering not supported. Terminate
252 * with prejudice. 249 * with prejudice.
253 */ 250 */
254 elv_dequeue_request(q, rq); 251 blk_dequeue_request(rq);
255 if (__blk_end_request(rq, -EOPNOTSUPP, 252 __blk_end_request_all(rq, -EOPNOTSUPP);
256 blk_rq_bytes(rq)))
257 BUG();
258 *rqp = NULL; 253 *rqp = NULL;
259 return false; 254 return false;
260 } 255 }
@@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
329 /* 324 /*
330 * The driver must store the error location in ->bi_sector, if 325 * The driver must store the error location in ->bi_sector, if
331 * it supports it. For non-stacked drivers, this should be copied 326 * it supports it. For non-stacked drivers, this should be copied
332 * from rq->sector. 327 * from blk_rq_pos(rq).
333 */ 328 */
334 if (error_sector) 329 if (error_sector)
335 *error_sector = bio->bi_sector; 330 *error_sector = bio->bi_sector;
diff --git a/block/blk-core.c b/block/blk-core.c
index c89883be873..59c4af52311 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -68,11 +68,11 @@ static void drive_stat_acct(struct request *rq, int new_io)
68 int rw = rq_data_dir(rq); 68 int rw = rq_data_dir(rq);
69 int cpu; 69 int cpu;
70 70
71 if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) 71 if (!blk_do_io_stat(rq))
72 return; 72 return;
73 73
74 cpu = part_stat_lock(); 74 cpu = part_stat_lock();
75 part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 75 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
76 76
77 if (!new_io) 77 if (!new_io)
78 part_stat_inc(cpu, part, merges[rw]); 78 part_stat_inc(cpu, part, merges[rw]);
@@ -127,13 +127,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
127 INIT_LIST_HEAD(&rq->timeout_list); 127 INIT_LIST_HEAD(&rq->timeout_list);
128 rq->cpu = -1; 128 rq->cpu = -1;
129 rq->q = q; 129 rq->q = q;
130 rq->sector = rq->hard_sector = (sector_t) -1; 130 rq->__sector = (sector_t) -1;
131 INIT_HLIST_NODE(&rq->hash); 131 INIT_HLIST_NODE(&rq->hash);
132 RB_CLEAR_NODE(&rq->rb_node); 132 RB_CLEAR_NODE(&rq->rb_node);
133 rq->cmd = rq->__cmd; 133 rq->cmd = rq->__cmd;
134 rq->cmd_len = BLK_MAX_CDB; 134 rq->cmd_len = BLK_MAX_CDB;
135 rq->tag = -1; 135 rq->tag = -1;
136 rq->ref_count = 1; 136 rq->ref_count = 1;
137 rq->start_time = jiffies;
137} 138}
138EXPORT_SYMBOL(blk_rq_init); 139EXPORT_SYMBOL(blk_rq_init);
139 140
@@ -184,14 +185,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
184 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
185 rq->cmd_flags); 186 rq->cmd_flags);
186 187
187 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 188 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
188 (unsigned long long)rq->sector, 189 (unsigned long long)blk_rq_pos(rq),
189 rq->nr_sectors, 190 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
190 rq->current_nr_sectors); 191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 192 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
192 rq->bio, rq->biotail,
193 rq->buffer, rq->data,
194 rq->data_len);
195 193
196 if (blk_pc_request(rq)) { 194 if (blk_pc_request(rq)) {
197 printk(KERN_INFO " cdb: "); 195 printk(KERN_INFO " cdb: ");
@@ -333,24 +331,6 @@ void blk_unplug(struct request_queue *q)
333} 331}
334EXPORT_SYMBOL(blk_unplug); 332EXPORT_SYMBOL(blk_unplug);
335 333
336static void blk_invoke_request_fn(struct request_queue *q)
337{
338 if (unlikely(blk_queue_stopped(q)))
339 return;
340
341 /*
342 * one level of recursion is ok and is much faster than kicking
343 * the unplug handling
344 */
345 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
346 q->request_fn(q);
347 queue_flag_clear(QUEUE_FLAG_REENTER, q);
348 } else {
349 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
350 kblockd_schedule_work(q, &q->unplug_work);
351 }
352}
353
354/** 334/**
355 * blk_start_queue - restart a previously stopped queue 335 * blk_start_queue - restart a previously stopped queue
356 * @q: The &struct request_queue in question 336 * @q: The &struct request_queue in question
@@ -365,7 +345,7 @@ void blk_start_queue(struct request_queue *q)
365 WARN_ON(!irqs_disabled()); 345 WARN_ON(!irqs_disabled());
366 346
367 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 347 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
368 blk_invoke_request_fn(q); 348 __blk_run_queue(q);
369} 349}
370EXPORT_SYMBOL(blk_start_queue); 350EXPORT_SYMBOL(blk_start_queue);
371 351
@@ -425,12 +405,23 @@ void __blk_run_queue(struct request_queue *q)
425{ 405{
426 blk_remove_plug(q); 406 blk_remove_plug(q);
427 407
408 if (unlikely(blk_queue_stopped(q)))
409 return;
410
411 if (elv_queue_empty(q))
412 return;
413
428 /* 414 /*
429 * Only recurse once to avoid overrunning the stack, let the unplug 415 * Only recurse once to avoid overrunning the stack, let the unplug
430 * handling reinvoke the handler shortly if we already got there. 416 * handling reinvoke the handler shortly if we already got there.
431 */ 417 */
432 if (!elv_queue_empty(q)) 418 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
433 blk_invoke_request_fn(q); 419 q->request_fn(q);
420 queue_flag_clear(QUEUE_FLAG_REENTER, q);
421 } else {
422 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
423 kblockd_schedule_work(q, &q->unplug_work);
424 }
434} 425}
435EXPORT_SYMBOL(__blk_run_queue); 426EXPORT_SYMBOL(__blk_run_queue);
436 427
@@ -440,9 +431,7 @@ EXPORT_SYMBOL(__blk_run_queue);
440 * 431 *
441 * Description: 432 * Description:
442 * Invoke request handling on this queue, if it has pending work to do. 433 * Invoke request handling on this queue, if it has pending work to do.
443 * May be used to restart queueing when a request has completed. Also 434 * May be used to restart queueing when a request has completed.
444 * See @blk_start_queueing.
445 *
446 */ 435 */
447void blk_run_queue(struct request_queue *q) 436void blk_run_queue(struct request_queue *q)
448{ 437{
@@ -902,26 +891,58 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
902EXPORT_SYMBOL(blk_get_request); 891EXPORT_SYMBOL(blk_get_request);
903 892
904/** 893/**
905 * blk_start_queueing - initiate dispatch of requests to device 894 * blk_make_request - given a bio, allocate a corresponding struct request.
906 * @q: request queue to kick into gear 895 *
896 * @bio: The bio describing the memory mappings that will be submitted for IO.
897 * It may be a chained-bio properly constructed by block/bio layer.
898 *
899 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
900 * type commands. Where the struct request needs to be farther initialized by
901 * the caller. It is passed a &struct bio, which describes the memory info of
902 * the I/O transfer.
907 * 903 *
908 * This is basically a helper to remove the need to know whether a queue 904 * The caller of blk_make_request must make sure that bi_io_vec
909 * is plugged or not if someone just wants to initiate dispatch of requests 905 * are set to describe the memory buffers. That bio_data_dir() will return
910 * for this queue. Should be used to start queueing on a device outside 906 * the needed direction of the request. (And all bio's in the passed bio-chain
911 * of ->request_fn() context. Also see @blk_run_queue. 907 * are properly set accordingly)
912 * 908 *
913 * The queue lock must be held with interrupts disabled. 909 * If called under none-sleepable conditions, mapped bio buffers must not
910 * need bouncing, by calling the appropriate masked or flagged allocator,
911 * suitable for the target device. Otherwise the call to blk_queue_bounce will
912 * BUG.
913 *
914 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
915 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
916 * anything but the first bio in the chain. Otherwise you risk waiting for IO
917 * completion of a bio that hasn't been submitted yet, thus resulting in a
918 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
919 * of bio_alloc(), as that avoids the mempool deadlock.
920 * If possible a big IO should be split into smaller parts when allocation
921 * fails. Partial allocation should not be an error, or you risk a live-lock.
914 */ 922 */
915void blk_start_queueing(struct request_queue *q) 923struct request *blk_make_request(struct request_queue *q, struct bio *bio,
924 gfp_t gfp_mask)
916{ 925{
917 if (!blk_queue_plugged(q)) { 926 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
918 if (unlikely(blk_queue_stopped(q))) 927
919 return; 928 if (unlikely(!rq))
920 q->request_fn(q); 929 return ERR_PTR(-ENOMEM);
921 } else 930
922 __generic_unplug_device(q); 931 for_each_bio(bio) {
932 struct bio *bounce_bio = bio;
933 int ret;
934
935 blk_queue_bounce(q, &bounce_bio);
936 ret = blk_rq_append_bio(q, rq, bounce_bio);
937 if (unlikely(ret)) {
938 blk_put_request(rq);
939 return ERR_PTR(ret);
940 }
941 }
942
943 return rq;
923} 944}
924EXPORT_SYMBOL(blk_start_queueing); 945EXPORT_SYMBOL(blk_make_request);
925 946
926/** 947/**
927 * blk_requeue_request - put a request back on queue 948 * blk_requeue_request - put a request back on queue
@@ -935,6 +956,8 @@ EXPORT_SYMBOL(blk_start_queueing);
935 */ 956 */
936void blk_requeue_request(struct request_queue *q, struct request *rq) 957void blk_requeue_request(struct request_queue *q, struct request *rq)
937{ 958{
959 BUG_ON(blk_queued_rq(rq));
960
938 blk_delete_timer(rq); 961 blk_delete_timer(rq);
939 blk_clear_rq_complete(rq); 962 blk_clear_rq_complete(rq);
940 trace_block_rq_requeue(q, rq); 963 trace_block_rq_requeue(q, rq);
@@ -977,7 +1000,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
977 * barrier 1000 * barrier
978 */ 1001 */
979 rq->cmd_type = REQ_TYPE_SPECIAL; 1002 rq->cmd_type = REQ_TYPE_SPECIAL;
980 rq->cmd_flags |= REQ_SOFTBARRIER;
981 1003
982 rq->special = data; 1004 rq->special = data;
983 1005
@@ -991,7 +1013,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
991 1013
992 drive_stat_acct(rq, 1); 1014 drive_stat_acct(rq, 1);
993 __elv_add_request(q, rq, where, 0); 1015 __elv_add_request(q, rq, where, 0);
994 blk_start_queueing(q); 1016 __blk_run_queue(q);
995 spin_unlock_irqrestore(q->queue_lock, flags); 1017 spin_unlock_irqrestore(q->queue_lock, flags);
996} 1018}
997EXPORT_SYMBOL(blk_insert_request); 1019EXPORT_SYMBOL(blk_insert_request);
@@ -1113,16 +1135,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1113 if (bio_failfast_driver(bio)) 1135 if (bio_failfast_driver(bio))
1114 req->cmd_flags |= REQ_FAILFAST_DRIVER; 1136 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1115 1137
1116 /*
1117 * REQ_BARRIER implies no merging, but lets make it explicit
1118 */
1119 if (unlikely(bio_discard(bio))) { 1138 if (unlikely(bio_discard(bio))) {
1120 req->cmd_flags |= REQ_DISCARD; 1139 req->cmd_flags |= REQ_DISCARD;
1121 if (bio_barrier(bio)) 1140 if (bio_barrier(bio))
1122 req->cmd_flags |= REQ_SOFTBARRIER; 1141 req->cmd_flags |= REQ_SOFTBARRIER;
1123 req->q->prepare_discard_fn(req->q, req); 1142 req->q->prepare_discard_fn(req->q, req);
1124 } else if (unlikely(bio_barrier(bio))) 1143 } else if (unlikely(bio_barrier(bio)))
1125 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 1144 req->cmd_flags |= REQ_HARDBARRIER;
1126 1145
1127 if (bio_sync(bio)) 1146 if (bio_sync(bio))
1128 req->cmd_flags |= REQ_RW_SYNC; 1147 req->cmd_flags |= REQ_RW_SYNC;
@@ -1132,9 +1151,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1132 req->cmd_flags |= REQ_NOIDLE; 1151 req->cmd_flags |= REQ_NOIDLE;
1133 1152
1134 req->errors = 0; 1153 req->errors = 0;
1135 req->hard_sector = req->sector = bio->bi_sector; 1154 req->__sector = bio->bi_sector;
1136 req->ioprio = bio_prio(bio); 1155 req->ioprio = bio_prio(bio);
1137 req->start_time = jiffies;
1138 blk_rq_bio_prep(req->q, req, bio); 1156 blk_rq_bio_prep(req->q, req, bio);
1139} 1157}
1140 1158
@@ -1150,14 +1168,13 @@ static inline bool queue_should_plug(struct request_queue *q)
1150static int __make_request(struct request_queue *q, struct bio *bio) 1168static int __make_request(struct request_queue *q, struct bio *bio)
1151{ 1169{
1152 struct request *req; 1170 struct request *req;
1153 int el_ret, nr_sectors; 1171 int el_ret;
1172 unsigned int bytes = bio->bi_size;
1154 const unsigned short prio = bio_prio(bio); 1173 const unsigned short prio = bio_prio(bio);
1155 const int sync = bio_sync(bio); 1174 const int sync = bio_sync(bio);
1156 const int unplug = bio_unplug(bio); 1175 const int unplug = bio_unplug(bio);
1157 int rw_flags; 1176 int rw_flags;
1158 1177
1159 nr_sectors = bio_sectors(bio);
1160
1161 /* 1178 /*
1162 * low level driver can indicate that it wants pages above a 1179 * low level driver can indicate that it wants pages above a
1163 * certain limit bounced to low memory (ie for highmem, or even 1180 * certain limit bounced to low memory (ie for highmem, or even
@@ -1182,7 +1199,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1182 1199
1183 req->biotail->bi_next = bio; 1200 req->biotail->bi_next = bio;
1184 req->biotail = bio; 1201 req->biotail = bio;
1185 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1202 req->__data_len += bytes;
1186 req->ioprio = ioprio_best(req->ioprio, prio); 1203 req->ioprio = ioprio_best(req->ioprio, prio);
1187 if (!blk_rq_cpu_valid(req)) 1204 if (!blk_rq_cpu_valid(req))
1188 req->cpu = bio->bi_comp_cpu; 1205 req->cpu = bio->bi_comp_cpu;
@@ -1208,10 +1225,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1208 * not touch req->buffer either... 1225 * not touch req->buffer either...
1209 */ 1226 */
1210 req->buffer = bio_data(bio); 1227 req->buffer = bio_data(bio);
1211 req->current_nr_sectors = bio_cur_sectors(bio); 1228 req->__sector = bio->bi_sector;
1212 req->hard_cur_sectors = req->current_nr_sectors; 1229 req->__data_len += bytes;
1213 req->sector = req->hard_sector = bio->bi_sector;
1214 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1215 req->ioprio = ioprio_best(req->ioprio, prio); 1230 req->ioprio = ioprio_best(req->ioprio, prio);
1216 if (!blk_rq_cpu_valid(req)) 1231 if (!blk_rq_cpu_valid(req))
1217 req->cpu = bio->bi_comp_cpu; 1232 req->cpu = bio->bi_comp_cpu;
@@ -1593,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
1593 */ 1608 */
1594int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1609int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1595{ 1610{
1596 if (rq->nr_sectors > q->max_sectors || 1611 if (blk_rq_sectors(rq) > q->max_sectors ||
1597 rq->data_len > q->max_hw_sectors << 9) { 1612 blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
1598 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1613 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1599 return -EIO; 1614 return -EIO;
1600 } 1615 }
@@ -1651,40 +1666,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1651} 1666}
1652EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1667EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1653 1668
1654/**
1655 * blkdev_dequeue_request - dequeue request and start timeout timer
1656 * @req: request to dequeue
1657 *
1658 * Dequeue @req and start timeout timer on it. This hands off the
1659 * request to the driver.
1660 *
1661 * Block internal functions which don't want to start timer should
1662 * call elv_dequeue_request().
1663 */
1664void blkdev_dequeue_request(struct request *req)
1665{
1666 elv_dequeue_request(req->q, req);
1667
1668 /*
1669 * We are now handing the request to the hardware, add the
1670 * timeout handler.
1671 */
1672 blk_add_timer(req);
1673}
1674EXPORT_SYMBOL(blkdev_dequeue_request);
1675
1676static void blk_account_io_completion(struct request *req, unsigned int bytes) 1669static void blk_account_io_completion(struct request *req, unsigned int bytes)
1677{ 1670{
1678 if (!blk_do_io_stat(req)) 1671 if (blk_do_io_stat(req)) {
1679 return;
1680
1681 if (blk_fs_request(req)) {
1682 const int rw = rq_data_dir(req); 1672 const int rw = rq_data_dir(req);
1683 struct hd_struct *part; 1673 struct hd_struct *part;
1684 int cpu; 1674 int cpu;
1685 1675
1686 cpu = part_stat_lock(); 1676 cpu = part_stat_lock();
1687 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1677 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1688 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1678 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1689 part_stat_unlock(); 1679 part_stat_unlock();
1690 } 1680 }
@@ -1692,22 +1682,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1692 1682
1693static void blk_account_io_done(struct request *req) 1683static void blk_account_io_done(struct request *req)
1694{ 1684{
1695 if (!blk_do_io_stat(req))
1696 return;
1697
1698 /* 1685 /*
1699 * Account IO completion. bar_rq isn't accounted as a normal 1686 * Account IO completion. bar_rq isn't accounted as a normal
1700 * IO on queueing nor completion. Accounting the containing 1687 * IO on queueing nor completion. Accounting the containing
1701 * request is enough. 1688 * request is enough.
1702 */ 1689 */
1703 if (blk_fs_request(req) && req != &req->q->bar_rq) { 1690 if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
1704 unsigned long duration = jiffies - req->start_time; 1691 unsigned long duration = jiffies - req->start_time;
1705 const int rw = rq_data_dir(req); 1692 const int rw = rq_data_dir(req);
1706 struct hd_struct *part; 1693 struct hd_struct *part;
1707 int cpu; 1694 int cpu;
1708 1695
1709 cpu = part_stat_lock(); 1696 cpu = part_stat_lock();
1710 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1697 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1711 1698
1712 part_stat_inc(cpu, part, ios[rw]); 1699 part_stat_inc(cpu, part, ios[rw]);
1713 part_stat_add(cpu, part, ticks[rw], duration); 1700 part_stat_add(cpu, part, ticks[rw], duration);
@@ -1719,38 +1706,218 @@ static void blk_account_io_done(struct request *req)
1719} 1706}
1720 1707
1721/** 1708/**
1722 * __end_that_request_first - end I/O on a request 1709 * blk_peek_request - peek at the top of a request queue
1723 * @req: the request being processed 1710 * @q: request queue to peek at
1711 *
1712 * Description:
1713 * Return the request at the top of @q. The returned request
1714 * should be started using blk_start_request() before LLD starts
1715 * processing it.
1716 *
1717 * Return:
1718 * Pointer to the request at the top of @q if available. Null
1719 * otherwise.
1720 *
1721 * Context:
1722 * queue_lock must be held.
1723 */
1724struct request *blk_peek_request(struct request_queue *q)
1725{
1726 struct request *rq;
1727 int ret;
1728
1729 while ((rq = __elv_next_request(q)) != NULL) {
1730 if (!(rq->cmd_flags & REQ_STARTED)) {
1731 /*
1732 * This is the first time the device driver
1733 * sees this request (possibly after
1734 * requeueing). Notify IO scheduler.
1735 */
1736 if (blk_sorted_rq(rq))
1737 elv_activate_rq(q, rq);
1738
1739 /*
1740 * just mark as started even if we don't start
1741 * it, a request that has been delayed should
1742 * not be passed by new incoming requests
1743 */
1744 rq->cmd_flags |= REQ_STARTED;
1745 trace_block_rq_issue(q, rq);
1746 }
1747
1748 if (!q->boundary_rq || q->boundary_rq == rq) {
1749 q->end_sector = rq_end_sector(rq);
1750 q->boundary_rq = NULL;
1751 }
1752
1753 if (rq->cmd_flags & REQ_DONTPREP)
1754 break;
1755
1756 if (q->dma_drain_size && blk_rq_bytes(rq)) {
1757 /*
1758 * make sure space for the drain appears we
1759 * know we can do this because max_hw_segments
1760 * has been adjusted to be one fewer than the
1761 * device can handle
1762 */
1763 rq->nr_phys_segments++;
1764 }
1765
1766 if (!q->prep_rq_fn)
1767 break;
1768
1769 ret = q->prep_rq_fn(q, rq);
1770 if (ret == BLKPREP_OK) {
1771 break;
1772 } else if (ret == BLKPREP_DEFER) {
1773 /*
1774 * the request may have been (partially) prepped.
1775 * we need to keep this request in the front to
1776 * avoid resource deadlock. REQ_STARTED will
1777 * prevent other fs requests from passing this one.
1778 */
1779 if (q->dma_drain_size && blk_rq_bytes(rq) &&
1780 !(rq->cmd_flags & REQ_DONTPREP)) {
1781 /*
1782 * remove the space for the drain we added
1783 * so that we don't add it again
1784 */
1785 --rq->nr_phys_segments;
1786 }
1787
1788 rq = NULL;
1789 break;
1790 } else if (ret == BLKPREP_KILL) {
1791 rq->cmd_flags |= REQ_QUIET;
1792 __blk_end_request_all(rq, -EIO);
1793 } else {
1794 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1795 break;
1796 }
1797 }
1798
1799 return rq;
1800}
1801EXPORT_SYMBOL(blk_peek_request);
1802
1803void blk_dequeue_request(struct request *rq)
1804{
1805 struct request_queue *q = rq->q;
1806
1807 BUG_ON(list_empty(&rq->queuelist));
1808 BUG_ON(ELV_ON_HASH(rq));
1809
1810 list_del_init(&rq->queuelist);
1811
1812 /*
1813 * the time frame between a request being removed from the lists
1814 * and to it is freed is accounted as io that is in progress at
1815 * the driver side.
1816 */
1817 if (blk_account_rq(rq))
1818 q->in_flight[rq_is_sync(rq)]++;
1819}
1820
1821/**
1822 * blk_start_request - start request processing on the driver
1823 * @req: request to dequeue
1824 *
1825 * Description:
1826 * Dequeue @req and start timeout timer on it. This hands off the
1827 * request to the driver.
1828 *
1829 * Block internal functions which don't want to start timer should
1830 * call blk_dequeue_request().
1831 *
1832 * Context:
1833 * queue_lock must be held.
1834 */
1835void blk_start_request(struct request *req)
1836{
1837 blk_dequeue_request(req);
1838
1839 /*
1840 * We are now handing the request to the hardware, initialize
1841 * resid_len to full count and add the timeout handler.
1842 */
1843 req->resid_len = blk_rq_bytes(req);
1844 blk_add_timer(req);
1845}
1846EXPORT_SYMBOL(blk_start_request);
1847
1848/**
1849 * blk_fetch_request - fetch a request from a request queue
1850 * @q: request queue to fetch a request from
1851 *
1852 * Description:
1853 * Return the request at the top of @q. The request is started on
1854 * return and LLD can start processing it immediately.
1855 *
1856 * Return:
1857 * Pointer to the request at the top of @q if available. Null
1858 * otherwise.
1859 *
1860 * Context:
1861 * queue_lock must be held.
1862 */
1863struct request *blk_fetch_request(struct request_queue *q)
1864{
1865 struct request *rq;
1866
1867 rq = blk_peek_request(q);
1868 if (rq)
1869 blk_start_request(rq);
1870 return rq;
1871}
1872EXPORT_SYMBOL(blk_fetch_request);
1873
1874/**
1875 * blk_update_request - Special helper function for request stacking drivers
1876 * @rq: the request being processed
1724 * @error: %0 for success, < %0 for error 1877 * @error: %0 for success, < %0 for error
1725 * @nr_bytes: number of bytes to complete 1878 * @nr_bytes: number of bytes to complete @rq
1726 * 1879 *
1727 * Description: 1880 * Description:
1728 * Ends I/O on a number of bytes attached to @req, and sets it up 1881 * Ends I/O on a number of bytes attached to @rq, but doesn't complete
1729 * for the next range of segments (if any) in the cluster. 1882 * the request structure even if @rq doesn't have leftover.
1883 * If @rq has leftover, sets it up for the next range of segments.
1884 *
1885 * This special helper function is only for request stacking drivers
1886 * (e.g. request-based dm) so that they can handle partial completion.
1887 * Actual device drivers should use blk_end_request instead.
1888 *
1889 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1890 * %false return from this function.
1730 * 1891 *
1731 * Return: 1892 * Return:
1732 * %0 - we are done with this request, call end_that_request_last() 1893 * %false - this request doesn't have any more data
1733 * %1 - still buffers pending for this request 1894 * %true - this request has more data
1734 **/ 1895 **/
1735static int __end_that_request_first(struct request *req, int error, 1896bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1736 int nr_bytes)
1737{ 1897{
1738 int total_bytes, bio_nbytes, next_idx = 0; 1898 int total_bytes, bio_nbytes, next_idx = 0;
1739 struct bio *bio; 1899 struct bio *bio;
1740 1900
1901 if (!req->bio)
1902 return false;
1903
1741 trace_block_rq_complete(req->q, req); 1904 trace_block_rq_complete(req->q, req);
1742 1905
1743 /* 1906 /*
1744 * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual 1907 * For fs requests, rq is just carrier of independent bio's
1745 * sense key with us all the way through 1908 * and each partial completion should be handled separately.
1909 * Reset per-request error on each partial completion.
1910 *
1911 * TODO: tj: This is too subtle. It would be better to let
1912 * low level drivers do what they see fit.
1746 */ 1913 */
1747 if (!blk_pc_request(req)) 1914 if (blk_fs_request(req))
1748 req->errors = 0; 1915 req->errors = 0;
1749 1916
1750 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1917 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1751 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1918 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1752 req->rq_disk ? req->rq_disk->disk_name : "?", 1919 req->rq_disk ? req->rq_disk->disk_name : "?",
1753 (unsigned long long)req->sector); 1920 (unsigned long long)blk_rq_pos(req));
1754 } 1921 }
1755 1922
1756 blk_account_io_completion(req, nr_bytes); 1923 blk_account_io_completion(req, nr_bytes);
@@ -1810,8 +1977,15 @@ static int __end_that_request_first(struct request *req, int error,
1810 /* 1977 /*
1811 * completely done 1978 * completely done
1812 */ 1979 */
1813 if (!req->bio) 1980 if (!req->bio) {
1814 return 0; 1981 /*
1982 * Reset counters so that the request stacking driver
1983 * can find how many bytes remain in the request
1984 * later.
1985 */
1986 req->__data_len = 0;
1987 return false;
1988 }
1815 1989
1816 /* 1990 /*
1817 * if the request wasn't completed, update state 1991 * if the request wasn't completed, update state
@@ -1823,22 +1997,56 @@ static int __end_that_request_first(struct request *req, int error,
1823 bio_iovec(bio)->bv_len -= nr_bytes; 1997 bio_iovec(bio)->bv_len -= nr_bytes;
1824 } 1998 }
1825 1999
1826 blk_recalc_rq_sectors(req, total_bytes >> 9); 2000 req->__data_len -= total_bytes;
2001 req->buffer = bio_data(req->bio);
2002
2003 /* update sector only for requests with clear definition of sector */
2004 if (blk_fs_request(req) || blk_discard_rq(req))
2005 req->__sector += total_bytes >> 9;
2006
2007 /*
2008 * If total number of sectors is less than the first segment
2009 * size, something has gone terribly wrong.
2010 */
2011 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2012 printk(KERN_ERR "blk: request botched\n");
2013 req->__data_len = blk_rq_cur_bytes(req);
2014 }
2015
2016 /* recalculate the number of segments */
1827 blk_recalc_rq_segments(req); 2017 blk_recalc_rq_segments(req);
1828 return 1; 2018
2019 return true;
2020}
2021EXPORT_SYMBOL_GPL(blk_update_request);
2022
2023static bool blk_update_bidi_request(struct request *rq, int error,
2024 unsigned int nr_bytes,
2025 unsigned int bidi_bytes)
2026{
2027 if (blk_update_request(rq, error, nr_bytes))
2028 return true;
2029
2030 /* Bidi request must be completed as a whole */
2031 if (unlikely(blk_bidi_rq(rq)) &&
2032 blk_update_request(rq->next_rq, error, bidi_bytes))
2033 return true;
2034
2035 add_disk_randomness(rq->rq_disk);
2036
2037 return false;
1829} 2038}
1830 2039
1831/* 2040/*
1832 * queue lock must be held 2041 * queue lock must be held
1833 */ 2042 */
1834static void end_that_request_last(struct request *req, int error) 2043static void blk_finish_request(struct request *req, int error)
1835{ 2044{
2045 BUG_ON(blk_queued_rq(req));
2046
1836 if (blk_rq_tagged(req)) 2047 if (blk_rq_tagged(req))
1837 blk_queue_end_tag(req->q, req); 2048 blk_queue_end_tag(req->q, req);
1838 2049
1839 if (blk_queued_rq(req))
1840 elv_dequeue_request(req->q, req);
1841
1842 if (unlikely(laptop_mode) && blk_fs_request(req)) 2050 if (unlikely(laptop_mode) && blk_fs_request(req))
1843 laptop_io_completion(); 2051 laptop_io_completion();
1844 2052
@@ -1857,117 +2065,62 @@ static void end_that_request_last(struct request *req, int error)
1857} 2065}
1858 2066
1859/** 2067/**
1860 * blk_rq_bytes - Returns bytes left to complete in the entire request 2068 * blk_end_bidi_request - Complete a bidi request
1861 * @rq: the request being processed 2069 * @rq: the request to complete
1862 **/ 2070 * @error: %0 for success, < %0 for error
1863unsigned int blk_rq_bytes(struct request *rq) 2071 * @nr_bytes: number of bytes to complete @rq
1864{ 2072 * @bidi_bytes: number of bytes to complete @rq->next_rq
1865 if (blk_fs_request(rq))
1866 return rq->hard_nr_sectors << 9;
1867
1868 return rq->data_len;
1869}
1870EXPORT_SYMBOL_GPL(blk_rq_bytes);
1871
1872/**
1873 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1874 * @rq: the request being processed
1875 **/
1876unsigned int blk_rq_cur_bytes(struct request *rq)
1877{
1878 if (blk_fs_request(rq))
1879 return rq->current_nr_sectors << 9;
1880
1881 if (rq->bio)
1882 return rq->bio->bi_size;
1883
1884 return rq->data_len;
1885}
1886EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1887
1888/**
1889 * end_request - end I/O on the current segment of the request
1890 * @req: the request being processed
1891 * @uptodate: error value or %0/%1 uptodate flag
1892 * 2073 *
1893 * Description: 2074 * Description:
1894 * Ends I/O on the current segment of a request. If that is the only 2075 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1895 * remaining segment, the request is also completed and freed. 2076 * Drivers that supports bidi can safely call this member for any
1896 * 2077 * type of request, bidi or uni. In the later case @bidi_bytes is
1897 * This is a remnant of how older block drivers handled I/O completions. 2078 * just ignored.
1898 * Modern drivers typically end I/O on the full request in one go, unless 2079 *
1899 * they have a residual value to account for. For that case this function 2080 * Return:
1900 * isn't really useful, unless the residual just happens to be the 2081 * %false - we are done with this request
1901 * full current segment. In other words, don't use this function in new 2082 * %true - still buffers pending for this request
1902 * code. Use blk_end_request() or __blk_end_request() to end a request.
1903 **/ 2083 **/
1904void end_request(struct request *req, int uptodate) 2084static bool blk_end_bidi_request(struct request *rq, int error,
1905{
1906 int error = 0;
1907
1908 if (uptodate <= 0)
1909 error = uptodate ? uptodate : -EIO;
1910
1911 __blk_end_request(req, error, req->hard_cur_sectors << 9);
1912}
1913EXPORT_SYMBOL(end_request);
1914
1915static int end_that_request_data(struct request *rq, int error,
1916 unsigned int nr_bytes, unsigned int bidi_bytes) 2085 unsigned int nr_bytes, unsigned int bidi_bytes)
1917{ 2086{
1918 if (rq->bio) { 2087 struct request_queue *q = rq->q;
1919 if (__end_that_request_first(rq, error, nr_bytes)) 2088 unsigned long flags;
1920 return 1;
1921 2089
1922 /* Bidi request must be completed as a whole */ 2090 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
1923 if (blk_bidi_rq(rq) && 2091 return true;
1924 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1925 return 1;
1926 }
1927 2092
1928 return 0; 2093 spin_lock_irqsave(q->queue_lock, flags);
2094 blk_finish_request(rq, error);
2095 spin_unlock_irqrestore(q->queue_lock, flags);
2096
2097 return false;
1929} 2098}
1930 2099
1931/** 2100/**
1932 * blk_end_io - Generic end_io function to complete a request. 2101 * __blk_end_bidi_request - Complete a bidi request with queue lock held
1933 * @rq: the request being processed 2102 * @rq: the request to complete
1934 * @error: %0 for success, < %0 for error 2103 * @error: %0 for success, < %0 for error
1935 * @nr_bytes: number of bytes to complete @rq 2104 * @nr_bytes: number of bytes to complete @rq
1936 * @bidi_bytes: number of bytes to complete @rq->next_rq 2105 * @bidi_bytes: number of bytes to complete @rq->next_rq
1937 * @drv_callback: function called between completion of bios in the request
1938 * and completion of the request.
1939 * If the callback returns non %0, this helper returns without
1940 * completion of the request.
1941 * 2106 *
1942 * Description: 2107 * Description:
1943 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2108 * Identical to blk_end_bidi_request() except that queue lock is
1944 * If @rq has leftover, sets it up for the next range of segments. 2109 * assumed to be locked on entry and remains so on return.
1945 * 2110 *
1946 * Return: 2111 * Return:
1947 * %0 - we are done with this request 2112 * %false - we are done with this request
1948 * %1 - this request is not freed yet, it still has pending buffers. 2113 * %true - still buffers pending for this request
1949 **/ 2114 **/
1950static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 2115static bool __blk_end_bidi_request(struct request *rq, int error,
1951 unsigned int bidi_bytes, 2116 unsigned int nr_bytes, unsigned int bidi_bytes)
1952 int (drv_callback)(struct request *))
1953{ 2117{
1954 struct request_queue *q = rq->q; 2118 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
1955 unsigned long flags = 0UL; 2119 return true;
1956
1957 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1958 return 1;
1959
1960 /* Special feature for tricky drivers */
1961 if (drv_callback && drv_callback(rq))
1962 return 1;
1963
1964 add_disk_randomness(rq->rq_disk);
1965 2120
1966 spin_lock_irqsave(q->queue_lock, flags); 2121 blk_finish_request(rq, error);
1967 end_that_request_last(rq, error);
1968 spin_unlock_irqrestore(q->queue_lock, flags);
1969 2122
1970 return 0; 2123 return false;
1971} 2124}
1972 2125
1973/** 2126/**
@@ -1981,124 +2134,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1981 * If @rq has leftover, sets it up for the next range of segments. 2134 * If @rq has leftover, sets it up for the next range of segments.
1982 * 2135 *
1983 * Return: 2136 * Return:
1984 * %0 - we are done with this request 2137 * %false - we are done with this request
1985 * %1 - still buffers pending for this request 2138 * %true - still buffers pending for this request
1986 **/ 2139 **/
1987int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2140bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1988{ 2141{
1989 return blk_end_io(rq, error, nr_bytes, 0, NULL); 2142 return blk_end_bidi_request(rq, error, nr_bytes, 0);
1990} 2143}
1991EXPORT_SYMBOL_GPL(blk_end_request); 2144EXPORT_SYMBOL_GPL(blk_end_request);
1992 2145
1993/** 2146/**
1994 * __blk_end_request - Helper function for drivers to complete the request. 2147 * blk_end_request_all - Helper function for drives to finish the request.
1995 * @rq: the request being processed 2148 * @rq: the request to finish
1996 * @error: %0 for success, < %0 for error 2149 * @err: %0 for success, < %0 for error
1997 * @nr_bytes: number of bytes to complete
1998 * 2150 *
1999 * Description: 2151 * Description:
2000 * Must be called with queue lock held unlike blk_end_request(). 2152 * Completely finish @rq.
2001 * 2153 */
2002 * Return: 2154void blk_end_request_all(struct request *rq, int error)
2003 * %0 - we are done with this request
2004 * %1 - still buffers pending for this request
2005 **/
2006int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2007{ 2155{
2008 if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) 2156 bool pending;
2009 return 1; 2157 unsigned int bidi_bytes = 0;
2010 2158
2011 add_disk_randomness(rq->rq_disk); 2159 if (unlikely(blk_bidi_rq(rq)))
2160 bidi_bytes = blk_rq_bytes(rq->next_rq);
2012 2161
2013 end_that_request_last(rq, error); 2162 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2163 BUG_ON(pending);
2164}
2165EXPORT_SYMBOL_GPL(blk_end_request_all);
2014 2166
2015 return 0; 2167/**
2168 * blk_end_request_cur - Helper function to finish the current request chunk.
2169 * @rq: the request to finish the current chunk for
2170 * @err: %0 for success, < %0 for error
2171 *
2172 * Description:
2173 * Complete the current consecutively mapped chunk from @rq.
2174 *
2175 * Return:
2176 * %false - we are done with this request
2177 * %true - still buffers pending for this request
2178 */
2179bool blk_end_request_cur(struct request *rq, int error)
2180{
2181 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2016} 2182}
2017EXPORT_SYMBOL_GPL(__blk_end_request); 2183EXPORT_SYMBOL_GPL(blk_end_request_cur);
2018 2184
2019/** 2185/**
2020 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 2186 * __blk_end_request - Helper function for drivers to complete the request.
2021 * @rq: the bidi request being processed 2187 * @rq: the request being processed
2022 * @error: %0 for success, < %0 for error 2188 * @error: %0 for success, < %0 for error
2023 * @nr_bytes: number of bytes to complete @rq 2189 * @nr_bytes: number of bytes to complete
2024 * @bidi_bytes: number of bytes to complete @rq->next_rq
2025 * 2190 *
2026 * Description: 2191 * Description:
2027 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2192 * Must be called with queue lock held unlike blk_end_request().
2028 * 2193 *
2029 * Return: 2194 * Return:
2030 * %0 - we are done with this request 2195 * %false - we are done with this request
2031 * %1 - still buffers pending for this request 2196 * %true - still buffers pending for this request
2032 **/ 2197 **/
2033int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 2198bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2034 unsigned int bidi_bytes)
2035{ 2199{
2036 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 2200 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2037} 2201}
2038EXPORT_SYMBOL_GPL(blk_end_bidi_request); 2202EXPORT_SYMBOL_GPL(__blk_end_request);
2039 2203
2040/** 2204/**
2041 * blk_update_request - Special helper function for request stacking drivers 2205 * __blk_end_request_all - Helper function for drives to finish the request.
2042 * @rq: the request being processed 2206 * @rq: the request to finish
2043 * @error: %0 for success, < %0 for error 2207 * @err: %0 for success, < %0 for error
2044 * @nr_bytes: number of bytes to complete @rq
2045 * 2208 *
2046 * Description: 2209 * Description:
2047 * Ends I/O on a number of bytes attached to @rq, but doesn't complete 2210 * Completely finish @rq. Must be called with queue lock held.
2048 * the request structure even if @rq doesn't have leftover.
2049 * If @rq has leftover, sets it up for the next range of segments.
2050 *
2051 * This special helper function is only for request stacking drivers
2052 * (e.g. request-based dm) so that they can handle partial completion.
2053 * Actual device drivers should use blk_end_request instead.
2054 */ 2211 */
2055void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) 2212void __blk_end_request_all(struct request *rq, int error)
2056{ 2213{
2057 if (!end_that_request_data(rq, error, nr_bytes, 0)) { 2214 bool pending;
2058 /* 2215 unsigned int bidi_bytes = 0;
2059 * These members are not updated in end_that_request_data() 2216
2060 * when all bios are completed. 2217 if (unlikely(blk_bidi_rq(rq)))
2061 * Update them so that the request stacking driver can find 2218 bidi_bytes = blk_rq_bytes(rq->next_rq);
2062 * how many bytes remain in the request later. 2219
2063 */ 2220 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2064 rq->nr_sectors = rq->hard_nr_sectors = 0; 2221 BUG_ON(pending);
2065 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2066 }
2067} 2222}
2068EXPORT_SYMBOL_GPL(blk_update_request); 2223EXPORT_SYMBOL_GPL(__blk_end_request_all);
2069 2224
2070/** 2225/**
2071 * blk_end_request_callback - Special helper function for tricky drivers 2226 * __blk_end_request_cur - Helper function to finish the current request chunk.
2072 * @rq: the request being processed 2227 * @rq: the request to finish the current chunk for
2073 * @error: %0 for success, < %0 for error 2228 * @err: %0 for success, < %0 for error
2074 * @nr_bytes: number of bytes to complete
2075 * @drv_callback: function called between completion of bios in the request
2076 * and completion of the request.
2077 * If the callback returns non %0, this helper returns without
2078 * completion of the request.
2079 * 2229 *
2080 * Description: 2230 * Description:
2081 * Ends I/O on a number of bytes attached to @rq. 2231 * Complete the current consecutively mapped chunk from @rq. Must
2082 * If @rq has leftover, sets it up for the next range of segments. 2232 * be called with queue lock held.
2083 *
2084 * This special helper function is used only for existing tricky drivers.
2085 * (e.g. cdrom_newpc_intr() of ide-cd)
2086 * This interface will be removed when such drivers are rewritten.
2087 * Don't use this interface in other places anymore.
2088 * 2233 *
2089 * Return: 2234 * Return:
2090 * %0 - we are done with this request 2235 * %false - we are done with this request
2091 * %1 - this request is not freed yet. 2236 * %true - still buffers pending for this request
2092 * this request still has pending buffers or 2237 */
2093 * the driver doesn't want to finish this request yet. 2238bool __blk_end_request_cur(struct request *rq, int error)
2094 **/
2095int blk_end_request_callback(struct request *rq, int error,
2096 unsigned int nr_bytes,
2097 int (drv_callback)(struct request *))
2098{ 2239{
2099 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); 2240 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2100} 2241}
2101EXPORT_SYMBOL_GPL(blk_end_request_callback); 2242EXPORT_SYMBOL_GPL(__blk_end_request_cur);
2102 2243
2103void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2244void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2104 struct bio *bio) 2245 struct bio *bio)
@@ -2111,11 +2252,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2111 rq->nr_phys_segments = bio_phys_segments(q, bio); 2252 rq->nr_phys_segments = bio_phys_segments(q, bio);
2112 rq->buffer = bio_data(bio); 2253 rq->buffer = bio_data(bio);
2113 } 2254 }
2114 rq->current_nr_sectors = bio_cur_sectors(bio); 2255 rq->__data_len = bio->bi_size;
2115 rq->hard_cur_sectors = rq->current_nr_sectors;
2116 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2117 rq->data_len = bio->bi_size;
2118
2119 rq->bio = rq->biotail = bio; 2256 rq->bio = rq->biotail = bio;
2120 2257
2121 if (bio->bi_bdev) 2258 if (bio->bi_bdev)
@@ -2158,6 +2295,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
2158 2295
2159int __init blk_dev_init(void) 2296int __init blk_dev_init(void)
2160{ 2297{
2298 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2299 sizeof(((struct request *)0)->cmd_flags));
2300
2161 kblockd_workqueue = create_workqueue("kblockd"); 2301 kblockd_workqueue = create_workqueue("kblockd");
2162 if (!kblockd_workqueue) 2302 if (!kblockd_workqueue)
2163 panic("Failed to create kblockd\n"); 2303 panic("Failed to create kblockd\n");
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 6af716d1e54..49557e91f0d 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 rq->rq_disk = bd_disk; 53 rq->rq_disk = bd_disk;
54 rq->cmd_flags |= REQ_NOMERGE;
55 rq->end_io = done; 54 rq->end_io = done;
56 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
57 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
diff --git a/block/blk-map.c b/block/blk-map.c
index f103729b462..ef2492adca7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->data_len += bio->bi_size; 23 rq->__data_len += bio->bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28 27
29static int __blk_rq_unmap_user(struct bio *bio) 28static int __blk_rq_unmap_user(struct bio *bio)
30{ 29{
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 if (!bio_flagged(bio, BIO_USER_MAPPED)) 155 if (!bio_flagged(bio, BIO_USER_MAPPED))
157 rq->cmd_flags |= REQ_COPY_USER; 156 rq->cmd_flags |= REQ_COPY_USER;
158 157
159 rq->buffer = rq->data = NULL; 158 rq->buffer = NULL;
160 return 0; 159 return 0;
161unmap_rq: 160unmap_rq:
162 blk_rq_unmap_user(bio); 161 blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
235 blk_queue_bounce(q, &bio); 234 blk_queue_bounce(q, &bio);
236 bio_get(bio); 235 bio_get(bio);
237 blk_rq_bio_prep(q, rq, bio); 236 blk_rq_bio_prep(q, rq, bio);
238 rq->buffer = rq->data = NULL; 237 rq->buffer = NULL;
239 return 0; 238 return 0;
240} 239}
241EXPORT_SYMBOL(blk_rq_map_user_iov); 240EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
282 * 281 *
283 * Description: 282 * Description:
284 * Data will be mapped directly if possible. Otherwise a bounce 283 * Data will be mapped directly if possible. Otherwise a bounce
285 * buffer is used. 284 * buffer is used. Can be called multple times to append multple
285 * buffers.
286 */ 286 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask) 288 unsigned int len, gfp_t gfp_mask)
@@ -290,6 +290,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 int reading = rq_data_dir(rq) == READ; 290 int reading = rq_data_dir(rq) == READ;
291 int do_copy = 0; 291 int do_copy = 0;
292 struct bio *bio; 292 struct bio *bio;
293 int ret;
293 294
294 if (len > (q->max_hw_sectors << 9)) 295 if (len > (q->max_hw_sectors << 9))
295 return -EINVAL; 296 return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
311 if (do_copy) 312 if (do_copy)
312 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
313 314
314 blk_rq_bio_prep(q, rq, bio); 315 ret = blk_rq_append_bio(q, rq, bio);
316 if (unlikely(ret)) {
317 /* request is too big */
318 bio_put(bio);
319 return ret;
320 }
321
315 blk_queue_bounce(q, &rq->bio); 322 blk_queue_bounce(q, &rq->bio);
316 rq->buffer = rq->data = NULL; 323 rq->buffer = NULL;
317 return 0; 324 return 0;
318} 325}
319EXPORT_SYMBOL(blk_rq_map_kern); 326EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a..4974dd5767e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{
14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect;
17
18 /*
19 * Move the I/O submission pointers ahead if required.
20 */
21 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
22 (rq->sector <= rq->hard_sector)) {
23 rq->sector = rq->hard_sector;
24 rq->nr_sectors = rq->hard_nr_sectors;
25 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
26 rq->current_nr_sectors = rq->hard_cur_sectors;
27 rq->buffer = bio_data(rq->bio);
28 }
29
30 /*
31 * if total number of sectors is less than the first segment
32 * size, something has gone terribly wrong
33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors;
37 }
38 }
39}
40
41static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio) 13 struct bio *bio)
43{ 14{
@@ -199,8 +170,9 @@ new_segment:
199 170
200 171
201 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 172 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
202 (rq->data_len & q->dma_pad_mask)) { 173 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
203 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; 174 unsigned int pad_len =
175 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
204 176
205 sg->length += pad_len; 177 sg->length += pad_len;
206 rq->extra_len += pad_len; 178 rq->extra_len += pad_len;
@@ -259,7 +231,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
259 else 231 else
260 max_sectors = q->max_sectors; 232 max_sectors = q->max_sectors;
261 233
262 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 234 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
263 req->cmd_flags |= REQ_NOMERGE; 235 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 236 if (req == q->last_merge)
265 q->last_merge = NULL; 237 q->last_merge = NULL;
@@ -284,7 +256,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
284 max_sectors = q->max_sectors; 256 max_sectors = q->max_sectors;
285 257
286 258
287 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 259 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
288 req->cmd_flags |= REQ_NOMERGE; 260 req->cmd_flags |= REQ_NOMERGE;
289 if (req == q->last_merge) 261 if (req == q->last_merge)
290 q->last_merge = NULL; 262 q->last_merge = NULL;
@@ -315,7 +287,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
315 /* 287 /*
316 * Will it become too large? 288 * Will it become too large?
317 */ 289 */
318 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 290 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
319 return 0; 291 return 0;
320 292
321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 293 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -345,7 +317,7 @@ static void blk_account_io_merge(struct request *req)
345 int cpu; 317 int cpu;
346 318
347 cpu = part_stat_lock(); 319 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector); 320 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
349 321
350 part_round_stats(cpu, part); 322 part_round_stats(cpu, part);
351 part_dec_in_flight(part); 323 part_dec_in_flight(part);
@@ -366,7 +338,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
366 /* 338 /*
367 * not contiguous 339 * not contiguous
368 */ 340 */
369 if (req->sector + req->nr_sectors != next->sector) 341 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
370 return 0; 342 return 0;
371 343
372 if (rq_data_dir(req) != rq_data_dir(next) 344 if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
398 req->biotail->bi_next = next->bio; 370 req->biotail->bi_next = next->bio;
399 req->biotail = next->biotail; 371 req->biotail = next->biotail;
400 372
401 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; 373 req->__data_len += blk_rq_bytes(next);
402 374
403 elv_merge_requests(q, req, next); 375 elv_merge_requests(q, req, next);
404 376
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3c518e3303a..2e5cfeb5933 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
336int blk_queue_start_tag(struct request_queue *q, struct request *rq) 336int blk_queue_start_tag(struct request_queue *q, struct request *rq)
337{ 337{
338 struct blk_queue_tag *bqt = q->queue_tags; 338 struct blk_queue_tag *bqt = q->queue_tags;
339 unsigned max_depth, offset; 339 unsigned max_depth;
340 int tag; 340 int tag;
341 341
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
355 * to starve sync IO on behalf of flooding async IO. 355 * to starve sync IO on behalf of flooding async IO.
356 */ 356 */
357 max_depth = bqt->max_depth; 357 max_depth = bqt->max_depth;
358 if (rq_is_sync(rq)) 358 if (!rq_is_sync(rq) && max_depth > 1) {
359 offset = 0; 359 max_depth -= 2;
360 else 360 if (!max_depth)
361 offset = max_depth >> 2; 361 max_depth = 1;
362 if (q->in_flight[0] > max_depth)
363 return 1;
364 }
362 365
363 do { 366 do {
364 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); 367 tag = find_first_zero_bit(bqt->tag_map, max_depth);
365 if (tag >= max_depth) 368 if (tag >= max_depth)
366 return 1; 369 return 1;
367 370
@@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
374 rq->cmd_flags |= REQ_QUEUED; 377 rq->cmd_flags |= REQ_QUEUED;
375 rq->tag = tag; 378 rq->tag = tag;
376 bqt->tag_index[tag] = rq; 379 bqt->tag_index[tag] = rq;
377 blkdev_dequeue_request(rq); 380 blk_start_request(rq);
378 list_add(&rq->queuelist, &q->tag_busy_list); 381 list_add(&rq->queuelist, &q->tag_busy_list);
379 return 0; 382 return 0;
380} 383}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ec0d503cac..1ba7e0aca87 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
122 if (blk_mark_rq_complete(rq)) 122 if (blk_mark_rq_complete(rq))
123 continue; 123 continue;
124 blk_rq_timed_out(rq); 124 blk_rq_timed_out(rq);
125 } else { 125 } else if (!next || time_after(next, rq->deadline))
126 if (!next || time_after(next, rq->deadline)) 126 next = rq->deadline;
127 next = rq->deadline;
128 }
129 } 127 }
130 128
131 /* 129 /*
@@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
176 BUG_ON(!list_empty(&req->timeout_list)); 174 BUG_ON(!list_empty(&req->timeout_list));
177 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 175 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
178 176
179 if (req->timeout) 177 /*
180 req->deadline = jiffies + req->timeout; 178 * Some LLDs, like scsi, peek at the timeout to prevent a
181 else { 179 * command from being retried forever.
182 req->deadline = jiffies + q->rq_timeout; 180 */
183 /* 181 if (!req->timeout)
184 * Some LLDs, like scsi, peek at the timeout to prevent
185 * a command from being retried forever.
186 */
187 req->timeout = q->rq_timeout; 182 req->timeout = q->rq_timeout;
188 } 183
184 req->deadline = jiffies + req->timeout;
189 list_add_tail(&req->timeout_list, &q->timeout_list); 185 list_add_tail(&req->timeout_list, &q->timeout_list);
190 186
191 /* 187 /*
diff --git a/block/blk.h b/block/blk.h
index 79c85f7c9ff..c863ec2281e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
13void init_request_from_bio(struct request *req, struct bio *bio); 13void init_request_from_bio(struct request *req, struct bio *bio);
14void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 14void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
15 struct bio *bio); 15 struct bio *bio);
16int blk_rq_append_bio(struct request_queue *q, struct request *rq,
17 struct bio *bio);
18void blk_dequeue_request(struct request *rq);
16void __blk_queue_free_tags(struct request_queue *q); 19void __blk_queue_free_tags(struct request_queue *q);
17 20
18void blk_unplug_work(struct work_struct *work); 21void blk_unplug_work(struct work_struct *work);
@@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
43 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 46 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
44} 47}
45 48
49/*
50 * Internal elevator interface
51 */
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53
54static inline struct request *__elv_next_request(struct request_queue *q)
55{
56 struct request *rq;
57
58 while (1) {
59 while (!list_empty(&q->queue_head)) {
60 rq = list_entry_rq(q->queue_head.next);
61 if (blk_do_ordered(q, &rq))
62 return rq;
63 }
64
65 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
66 return NULL;
67 }
68}
69
70static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
71{
72 struct elevator_queue *e = q->elevator;
73
74 if (e->ops->elevator_activate_req_fn)
75 e->ops->elevator_activate_req_fn(q, rq);
76}
77
78static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
79{
80 struct elevator_queue *e = q->elevator;
81
82 if (e->ops->elevator_deactivate_req_fn)
83 e->ops->elevator_deactivate_req_fn(q, rq);
84}
85
46#ifdef CONFIG_FAIL_IO_TIMEOUT 86#ifdef CONFIG_FAIL_IO_TIMEOUT
47int blk_should_fake_timeout(struct request_queue *); 87int blk_should_fake_timeout(struct request_queue *);
48ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 88ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
64int attempt_back_merge(struct request_queue *q, struct request *rq); 104int attempt_back_merge(struct request_queue *q, struct request *rq);
65int attempt_front_merge(struct request_queue *q, struct request *rq); 105int attempt_front_merge(struct request_queue *q, struct request *rq);
66void blk_recalc_rq_segments(struct request *rq); 106void blk_recalc_rq_segments(struct request *rq);
67void blk_recalc_rq_sectors(struct request *rq, int nsect);
68 107
69void blk_queue_congestion_threshold(struct request_queue *q); 108void blk_queue_congestion_threshold(struct request_queue *q);
70 109
@@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
112#endif 151#endif
113} 152}
114 153
154/*
155 * Contribute to IO statistics IFF:
156 *
157 * a) it's attached to a gendisk, and
158 * b) the queue had IO stats enabled when this request was started, and
159 * c) it's a file system request
160 */
115static inline int blk_do_io_stat(struct request *rq) 161static inline int blk_do_io_stat(struct request *rq)
116{ 162{
117 return rq->rq_disk && blk_rq_io_stat(rq); 163 return rq->rq_disk && blk_rq_io_stat(rq) && blk_fs_request(rq) &&
164 blk_discard_rq(rq);
118} 165}
119 166
120#endif 167#endif
diff --git a/block/bsg.c b/block/bsg.c
index 206060e795d..2d746e34f4c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -445,14 +445,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
445 } 445 }
446 446
447 if (rq->next_rq) { 447 if (rq->next_rq) {
448 hdr->dout_resid = rq->data_len; 448 hdr->dout_resid = rq->resid_len;
449 hdr->din_resid = rq->next_rq->data_len; 449 hdr->din_resid = rq->next_rq->resid_len;
450 blk_rq_unmap_user(bidi_bio); 450 blk_rq_unmap_user(bidi_bio);
451 blk_put_request(rq->next_rq); 451 blk_put_request(rq->next_rq);
452 } else if (rq_data_dir(rq) == READ) 452 } else if (rq_data_dir(rq) == READ)
453 hdr->din_resid = rq->data_len; 453 hdr->din_resid = rq->resid_len;
454 else 454 else
455 hdr->dout_resid = rq->data_len; 455 hdr->dout_resid = rq->resid_len;
456 456
457 /* 457 /*
458 * If the request generated a negative error number, return it 458 * If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a55a9bd75bd..99ac4304d71 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
350 return rq2; 350 return rq2;
351 351
352 s1 = rq1->sector; 352 s1 = blk_rq_pos(rq1);
353 s2 = rq2->sector; 353 s2 = blk_rq_pos(rq2);
354 354
355 last = cfqd->last_position; 355 last = cfqd->last_position;
356 356
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
579 * Sort strictly based on sector. Smallest to the left, 579 * Sort strictly based on sector. Smallest to the left,
580 * largest to the right. 580 * largest to the right.
581 */ 581 */
582 if (sector > cfqq->next_rq->sector) 582 if (sector > blk_rq_pos(cfqq->next_rq))
583 n = &(*p)->rb_right; 583 n = &(*p)->rb_right;
584 else if (sector < cfqq->next_rq->sector) 584 else if (sector < blk_rq_pos(cfqq->next_rq))
585 n = &(*p)->rb_left; 585 n = &(*p)->rb_left;
586 else 586 else
587 break; 587 break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
611 return; 611 return;
612 612
613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, 614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
615 &parent, &p); 615 blk_rq_pos(cfqq->next_rq), &parent, &p);
616 if (!__cfqq) { 616 if (!__cfqq) {
617 rb_link_node(&cfqq->p_node, parent, p); 617 rb_link_node(&cfqq->p_node, parent, p);
618 rb_insert_color(&cfqq->p_node, cfqq->p_root); 618 rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
760 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 760 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
761 cfqd->rq_in_driver); 761 cfqd->rq_in_driver);
762 762
763 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 763 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
764} 764}
765 765
766static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 766static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
950 struct request *rq) 950 struct request *rq)
951{ 951{
952 if (rq->sector >= cfqd->last_position) 952 if (blk_rq_pos(rq) >= cfqd->last_position)
953 return rq->sector - cfqd->last_position; 953 return blk_rq_pos(rq) - cfqd->last_position;
954 else 954 else
955 return cfqd->last_position - rq->sector; 955 return cfqd->last_position - blk_rq_pos(rq);
956} 956}
957 957
958#define CIC_SEEK_THR 8 * 1024 958#define CIC_SEEK_THR 8 * 1024
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
996 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq; 997 return __cfqq;
998 998
999 if (__cfqq->next_rq->sector < sector) 999 if (blk_rq_pos(__cfqq->next_rq) < sector)
1000 node = rb_next(&__cfqq->p_node); 1000 node = rb_next(&__cfqq->p_node);
1001 else 1001 else
1002 node = rb_prev(&__cfqq->p_node); 1002 node = rb_prev(&__cfqq->p_node);
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1918 1918
1919 if (!cic->last_request_pos) 1919 if (!cic->last_request_pos)
1920 sdist = 0; 1920 sdist = 0;
1921 else if (cic->last_request_pos < rq->sector) 1921 else if (cic->last_request_pos < blk_rq_pos(rq))
1922 sdist = rq->sector - cic->last_request_pos; 1922 sdist = blk_rq_pos(rq) - cic->last_request_pos;
1923 else 1923 else
1924 sdist = cic->last_request_pos - rq->sector; 1924 sdist = cic->last_request_pos - blk_rq_pos(rq);
1925 1925
1926 /* 1926 /*
1927 * Don't allow the seek distance to get too large from the 1927 * Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2071 cfq_update_io_seektime(cfqd, cic, rq); 2071 cfq_update_io_seektime(cfqd, cic, rq);
2072 cfq_update_idle_window(cfqd, cfqq, cic); 2072 cfq_update_idle_window(cfqd, cfqq, cic);
2073 2073
2074 cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2075 2075
2076 if (cfqq == cfqd->active_queue) { 2076 if (cfqq == cfqd->active_queue) {
2077 /* 2077 /*
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2088 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 2088 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2089 cfqd->busy_queues > 1) { 2089 cfqd->busy_queues > 1) {
2090 del_timer(&cfqd->idle_slice_timer); 2090 del_timer(&cfqd->idle_slice_timer);
2091 blk_start_queueing(cfqd->queue); 2091 __blk_run_queue(cfqd->queue);
2092 } 2092 }
2093 cfq_mark_cfqq_must_dispatch(cfqq); 2093 cfq_mark_cfqq_must_dispatch(cfqq);
2094 } 2094 }
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2100 * this new queue is RT and the current one is BE 2100 * this new queue is RT and the current one is BE
2101 */ 2101 */
2102 cfq_preempt_queue(cfqd, cfqq); 2102 cfq_preempt_queue(cfqd, cfqq);
2103 blk_start_queueing(cfqd->queue); 2103 __blk_run_queue(cfqd->queue);
2104 } 2104 }
2105} 2105}
2106 2106
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
2345 struct request_queue *q = cfqd->queue; 2345 struct request_queue *q = cfqd->queue;
2346 2346
2347 spin_lock_irq(q->queue_lock); 2347 spin_lock_irq(q->queue_lock);
2348 blk_start_queueing(q); 2348 __blk_run_queue(cfqd->queue);
2349 spin_unlock_irq(q->queue_lock); 2349 spin_unlock_irq(q->queue_lock);
2350} 2350}
2351 2351
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4ade..b547cbca7b2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
138 138
139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
140 if (__rq) { 140 if (__rq) {
141 BUG_ON(sector != __rq->sector); 141 BUG_ON(sector != blk_rq_pos(__rq));
142 142
143 if (elv_rq_merge_ok(__rq, bio)) { 143 if (elv_rq_merge_ok(__rq, bio)) {
144 ret = ELEVATOR_FRONT_MERGE; 144 ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index 7073a907257..ebee948293e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -52,8 +52,7 @@ static const int elv_hash_shift = 6;
52#define ELV_HASH_FN(sec) \ 52#define ELV_HASH_FN(sec) \
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
56#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
57 56
58DEFINE_TRACE(block_rq_insert); 57DEFINE_TRACE(block_rq_insert);
59DEFINE_TRACE(block_rq_issue); 58DEFINE_TRACE(block_rq_issue);
@@ -120,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
120 * we can merge and sequence is ok, check if it's possible 119 * we can merge and sequence is ok, check if it's possible
121 */ 120 */
122 if (elv_rq_merge_ok(__rq, bio)) { 121 if (elv_rq_merge_ok(__rq, bio)) {
123 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 122 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE; 123 ret = ELEVATOR_BACK_MERGE;
125 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 124 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE; 125 ret = ELEVATOR_FRONT_MERGE;
127 } 126 }
128 127
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e)
310} 309}
311EXPORT_SYMBOL(elevator_exit); 310EXPORT_SYMBOL(elevator_exit);
312 311
313static void elv_activate_rq(struct request_queue *q, struct request *rq)
314{
315 struct elevator_queue *e = q->elevator;
316
317 if (e->ops->elevator_activate_req_fn)
318 e->ops->elevator_activate_req_fn(q, rq);
319}
320
321static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
322{
323 struct elevator_queue *e = q->elevator;
324
325 if (e->ops->elevator_deactivate_req_fn)
326 e->ops->elevator_deactivate_req_fn(q, rq);
327}
328
329static inline void __elv_rqhash_del(struct request *rq) 312static inline void __elv_rqhash_del(struct request *rq)
330{ 313{
331 hlist_del_init(&rq->hash); 314 hlist_del_init(&rq->hash);
@@ -387,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
387 parent = *p; 370 parent = *p;
388 __rq = rb_entry(parent, struct request, rb_node); 371 __rq = rb_entry(parent, struct request, rb_node);
389 372
390 if (rq->sector < __rq->sector) 373 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
391 p = &(*p)->rb_left; 374 p = &(*p)->rb_left;
392 else if (rq->sector > __rq->sector) 375 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
393 p = &(*p)->rb_right; 376 p = &(*p)->rb_right;
394 else 377 else
395 return __rq; 378 return __rq;
@@ -417,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
417 while (n) { 400 while (n) {
418 rq = rb_entry(n, struct request, rb_node); 401 rq = rb_entry(n, struct request, rb_node);
419 402
420 if (sector < rq->sector) 403 if (sector < blk_rq_pos(rq))
421 n = n->rb_left; 404 n = n->rb_left;
422 else if (sector > rq->sector) 405 else if (sector > blk_rq_pos(rq))
423 n = n->rb_right; 406 n = n->rb_right;
424 else 407 else
425 return rq; 408 return rq;
@@ -458,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
458 break; 441 break;
459 if (pos->cmd_flags & stop_flags) 442 if (pos->cmd_flags & stop_flags)
460 break; 443 break;
461 if (rq->sector >= boundary) { 444 if (blk_rq_pos(rq) >= boundary) {
462 if (pos->sector < boundary) 445 if (blk_rq_pos(pos) < boundary)
463 continue; 446 continue;
464 } else { 447 } else {
465 if (pos->sector >= boundary) 448 if (blk_rq_pos(pos) >= boundary)
466 break; 449 break;
467 } 450 }
468 if (rq->sector >= pos->sector) 451 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
469 break; 452 break;
470 } 453 }
471 454
@@ -563,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
563 * in_flight count again 546 * in_flight count again
564 */ 547 */
565 if (blk_account_rq(rq)) { 548 if (blk_account_rq(rq)) {
566 q->in_flight--; 549 q->in_flight[rq_is_sync(rq)]--;
567 if (blk_sorted_rq(rq)) 550 if (blk_sorted_rq(rq))
568 elv_deactivate_rq(q, rq); 551 elv_deactivate_rq(q, rq);
569 } 552 }
@@ -599,7 +582,7 @@ void elv_quiesce_start(struct request_queue *q)
599 */ 582 */
600 elv_drain_elevator(q); 583 elv_drain_elevator(q);
601 while (q->rq.elvpriv) { 584 while (q->rq.elvpriv) {
602 blk_start_queueing(q); 585 __blk_run_queue(q);
603 spin_unlock_irq(q->queue_lock); 586 spin_unlock_irq(q->queue_lock);
604 msleep(10); 587 msleep(10);
605 spin_lock_irq(q->queue_lock); 588 spin_lock_irq(q->queue_lock);
@@ -643,8 +626,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
643 * with anything. There's no point in delaying queue 626 * with anything. There's no point in delaying queue
644 * processing. 627 * processing.
645 */ 628 */
646 blk_remove_plug(q); 629 __blk_run_queue(q);
647 blk_start_queueing(q);
648 break; 630 break;
649 631
650 case ELEVATOR_INSERT_SORT: 632 case ELEVATOR_INSERT_SORT:
@@ -703,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
703 685
704 if (unplug_it && blk_queue_plugged(q)) { 686 if (unplug_it && blk_queue_plugged(q)) {
705 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 687 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
706 - q->in_flight; 688 - queue_in_flight(q);
707 689
708 if (nrq >= q->unplug_thresh) 690 if (nrq >= q->unplug_thresh)
709 __generic_unplug_device(q); 691 __generic_unplug_device(q);
@@ -759,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
759} 741}
760EXPORT_SYMBOL(elv_add_request); 742EXPORT_SYMBOL(elv_add_request);
761 743
762static inline struct request *__elv_next_request(struct request_queue *q)
763{
764 struct request *rq;
765
766 while (1) {
767 while (!list_empty(&q->queue_head)) {
768 rq = list_entry_rq(q->queue_head.next);
769 if (blk_do_ordered(q, &rq))
770 return rq;
771 }
772
773 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
774 return NULL;
775 }
776}
777
778struct request *elv_next_request(struct request_queue *q)
779{
780 struct request *rq;
781 int ret;
782
783 while ((rq = __elv_next_request(q)) != NULL) {
784 if (!(rq->cmd_flags & REQ_STARTED)) {
785 /*
786 * This is the first time the device driver
787 * sees this request (possibly after
788 * requeueing). Notify IO scheduler.
789 */
790 if (blk_sorted_rq(rq))
791 elv_activate_rq(q, rq);
792
793 /*
794 * just mark as started even if we don't start
795 * it, a request that has been delayed should
796 * not be passed by new incoming requests
797 */
798 rq->cmd_flags |= REQ_STARTED;
799 trace_block_rq_issue(q, rq);
800 }
801
802 if (!q->boundary_rq || q->boundary_rq == rq) {
803 q->end_sector = rq_end_sector(rq);
804 q->boundary_rq = NULL;
805 }
806
807 if (rq->cmd_flags & REQ_DONTPREP)
808 break;
809
810 if (q->dma_drain_size && rq->data_len) {
811 /*
812 * make sure space for the drain appears we
813 * know we can do this because max_hw_segments
814 * has been adjusted to be one fewer than the
815 * device can handle
816 */
817 rq->nr_phys_segments++;
818 }
819
820 if (!q->prep_rq_fn)
821 break;
822
823 ret = q->prep_rq_fn(q, rq);
824 if (ret == BLKPREP_OK) {
825 break;
826 } else if (ret == BLKPREP_DEFER) {
827 /*
828 * the request may have been (partially) prepped.
829 * we need to keep this request in the front to
830 * avoid resource deadlock. REQ_STARTED will
831 * prevent other fs requests from passing this one.
832 */
833 if (q->dma_drain_size && rq->data_len &&
834 !(rq->cmd_flags & REQ_DONTPREP)) {
835 /*
836 * remove the space for the drain we added
837 * so that we don't add it again
838 */
839 --rq->nr_phys_segments;
840 }
841
842 rq = NULL;
843 break;
844 } else if (ret == BLKPREP_KILL) {
845 rq->cmd_flags |= REQ_QUIET;
846 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
847 } else {
848 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
849 break;
850 }
851 }
852
853 return rq;
854}
855EXPORT_SYMBOL(elv_next_request);
856
857void elv_dequeue_request(struct request_queue *q, struct request *rq)
858{
859 BUG_ON(list_empty(&rq->queuelist));
860 BUG_ON(ELV_ON_HASH(rq));
861
862 list_del_init(&rq->queuelist);
863
864 /*
865 * the time frame between a request being removed from the lists
866 * and to it is freed is accounted as io that is in progress at
867 * the driver side.
868 */
869 if (blk_account_rq(rq))
870 q->in_flight++;
871}
872
873int elv_queue_empty(struct request_queue *q) 744int elv_queue_empty(struct request_queue *q)
874{ 745{
875 struct elevator_queue *e = q->elevator; 746 struct elevator_queue *e = q->elevator;
@@ -939,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
939 rq = list_entry_rq(q->queue_head.next); 810 rq = list_entry_rq(q->queue_head.next);
940 rq->cmd_flags |= REQ_QUIET; 811 rq->cmd_flags |= REQ_QUIET;
941 trace_block_rq_abort(q, rq); 812 trace_block_rq_abort(q, rq);
942 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 813 __blk_end_request_all(rq, -EIO);
943 } 814 }
944} 815}
945EXPORT_SYMBOL(elv_abort_queue); 816EXPORT_SYMBOL(elv_abort_queue);
@@ -952,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
952 * request is released from the driver, io must be done 823 * request is released from the driver, io must be done
953 */ 824 */
954 if (blk_account_rq(rq)) { 825 if (blk_account_rq(rq)) {
955 q->in_flight--; 826 q->in_flight[rq_is_sync(rq)]--;
956 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 827 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
957 e->ops->elevator_completed_req_fn(q, rq); 828 e->ops->elevator_completed_req_fn(q, rq);
958 } 829 }
@@ -967,11 +838,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
967 if (!list_empty(&q->queue_head)) 838 if (!list_empty(&q->queue_head))
968 next = list_entry_rq(q->queue_head.next); 839 next = list_entry_rq(q->queue_head.next);
969 840
970 if (!q->in_flight && 841 if (!queue_in_flight(q) &&
971 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 842 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
972 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 843 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
973 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 844 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
974 blk_start_queueing(q); 845 __blk_run_queue(q);
975 } 846 }
976 } 847 }
977} 848}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 82a0ca2f672..a9670dd4b5d 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
230 hdr->info = 0; 230 hdr->info = 0;
231 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 231 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
232 hdr->info |= SG_INFO_CHECK; 232 hdr->info |= SG_INFO_CHECK;
233 hdr->resid = rq->data_len; 233 hdr->resid = rq->resid_len;
234 hdr->sb_len_wr = 0; 234 hdr->sb_len_wr = 0;
235 235
236 if (rq->sense_len && hdr->sbp) { 236 if (rq->sense_len && hdr->sbp) {
@@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
500 500
501 rq = blk_get_request(q, WRITE, __GFP_WAIT); 501 rq = blk_get_request(q, WRITE, __GFP_WAIT);
502 rq->cmd_type = REQ_TYPE_BLOCK_PC; 502 rq->cmd_type = REQ_TYPE_BLOCK_PC;
503 rq->data = NULL;
504 rq->data_len = 0;
505 rq->extra_len = 0;
506 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 503 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
507 rq->cmd[0] = cmd; 504 rq->cmd[0] = cmd;
508 rq->cmd[4] = data; 505 rq->cmd[4] = data;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9..d0dfeef55db 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
1084 if (likely(!blk_pc_request(rq))) 1084 if (likely(!blk_pc_request(rq)))
1085 return 0; 1085 return 0;
1086 1086
1087 if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
1088 return 0; 1088 return 0;
1089 1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f..668dc234b8e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3321 DAC960_Command_T *Command; 3321 DAC960_Command_T *Command;
3322 3322
3323 while(1) { 3323 while(1) {
3324 Request = elv_next_request(req_q); 3324 Request = blk_peek_request(req_q);
3325 if (!Request) 3325 if (!Request)
3326 return 1; 3326 return 1;
3327 3327
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3338 } 3338 }
3339 Command->Completion = Request->end_io_data; 3339 Command->Completion = Request->end_io_data;
3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3341 Command->BlockNumber = Request->sector; 3341 Command->BlockNumber = blk_rq_pos(Request);
3342 Command->BlockCount = Request->nr_sectors; 3342 Command->BlockCount = blk_rq_sectors(Request);
3343 Command->Request = Request; 3343 Command->Request = Request;
3344 blkdev_dequeue_request(Request); 3344 blk_start_request(Request);
3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3345 Command->SegmentCount = blk_rq_map_sg(req_q,
3346 Command->Request, Command->cmd_sglist); 3346 Command->Request, Command->cmd_sglist);
3347 /* pci_map_sg MAY change the value of SegCount */ 3347 /* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3431 * successfully as possible. 3431 * successfully as possible.
3432 */ 3432 */
3433 Command->SegmentCount = 1; 3433 Command->SegmentCount = 1;
3434 Command->BlockNumber = Request->sector; 3434 Command->BlockNumber = blk_rq_pos(Request);
3435 Command->BlockCount = 1; 3435 Command->BlockCount = 1;
3436 DAC960_QueueReadWriteCommand(Command); 3436 DAC960_QueueReadWriteCommand(Command);
3437 return; 3437 return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc..f42fa50d355 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
412 412
413config MG_DISK 413config MG_DISK
414 tristate "mGine mflash, gflash support" 414 tristate "mGine mflash, gflash support"
415 depends on ARM && ATA && GPIOLIB 415 depends on ARM && GPIOLIB
416 help 416 help
417 mGine mFlash(gFlash) block device driver 417 mGine mFlash(gFlash) block device driver
418 418
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff706..9c6e5b0fe89 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
112MODULE_LICENSE("GPL"); 112MODULE_LICENSE("GPL");
113 113
114static struct request_queue *floppy_queue; 114static struct request_queue *floppy_queue;
115#define QUEUE (floppy_queue)
116#define CURRENT elv_next_request(floppy_queue)
117 115
118/* 116/*
119 * Macros 117 * Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
1335 1333
1336static void redo_fd_request(void) 1334static void redo_fd_request(void)
1337{ 1335{
1336 struct request *rq;
1338 unsigned int cnt, block, track, sector; 1337 unsigned int cnt, block, track, sector;
1339 int drive; 1338 int drive;
1340 struct amiga_floppy_struct *floppy; 1339 struct amiga_floppy_struct *floppy;
1341 char *data; 1340 char *data;
1342 unsigned long flags; 1341 unsigned long flags;
1342 int err;
1343 1343
1344 repeat: 1344next_req:
1345 if (!CURRENT) { 1345 rq = blk_fetch_request(floppy_queue);
1346 if (!rq) {
1346 /* Nothing left to do */ 1347 /* Nothing left to do */
1347 return; 1348 return;
1348 } 1349 }
1349 1350
1350 floppy = CURRENT->rq_disk->private_data; 1351 floppy = rq->rq_disk->private_data;
1351 drive = floppy - unit; 1352 drive = floppy - unit;
1352 1353
1354next_segment:
1353 /* Here someone could investigate to be more efficient */ 1355 /* Here someone could investigate to be more efficient */
1354 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1356 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
1355#ifdef DEBUG 1357#ifdef DEBUG
1356 printk("fd: sector %ld + %d requested for %s\n", 1358 printk("fd: sector %ld + %d requested for %s\n",
1357 CURRENT->sector,cnt, 1359 blk_rq_pos(rq), cnt,
1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1360 (rq_data_dir(rq) == READ) ? "read" : "write");
1359#endif 1361#endif
1360 block = CURRENT->sector + cnt; 1362 block = blk_rq_pos(rq) + cnt;
1361 if ((int)block > floppy->blocks) { 1363 if ((int)block > floppy->blocks) {
1362 end_request(CURRENT, 0); 1364 err = -EIO;
1363 goto repeat; 1365 break;
1364 } 1366 }
1365 1367
1366 track = block / (floppy->dtype->sects * floppy->type->sect_mult); 1368 track = block / (floppy->dtype->sects * floppy->type->sect_mult);
1367 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); 1369 sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
1368 data = CURRENT->buffer + 512 * cnt; 1370 data = rq->buffer + 512 * cnt;
1369#ifdef DEBUG 1371#ifdef DEBUG
1370 printk("access to track %d, sector %d, with buffer at " 1372 printk("access to track %d, sector %d, with buffer at "
1371 "0x%08lx\n", track, sector, data); 1373 "0x%08lx\n", track, sector, data);
1372#endif 1374#endif
1373 1375
1374 if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
1375 printk(KERN_WARNING "do_fd_request: unknown command\n");
1376 end_request(CURRENT, 0);
1377 goto repeat;
1378 }
1379 if (get_track(drive, track) == -1) { 1376 if (get_track(drive, track) == -1) {
1380 end_request(CURRENT, 0); 1377 err = -EIO;
1381 goto repeat; 1378 break;
1382 } 1379 }
1383 1380
1384 switch (rq_data_dir(CURRENT)) { 1381 if (rq_data_dir(rq) == READ) {
1385 case READ:
1386 memcpy(data, floppy->trackbuf + sector * 512, 512); 1382 memcpy(data, floppy->trackbuf + sector * 512, 512);
1387 break; 1383 } else {
1388
1389 case WRITE:
1390 memcpy(floppy->trackbuf + sector * 512, data, 512); 1384 memcpy(floppy->trackbuf + sector * 512, data, 512);
1391 1385
1392 /* keep the drive spinning while writes are scheduled */ 1386 /* keep the drive spinning while writes are scheduled */
1393 if (!fd_motor_on(drive)) { 1387 if (!fd_motor_on(drive)) {
1394 end_request(CURRENT, 0); 1388 err = -EIO;
1395 goto repeat; 1389 break;
1396 } 1390 }
1397 /* 1391 /*
1398 * setup a callback to write the track buffer 1392 * setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
1404 /* reset the timer */ 1398 /* reset the timer */
1405 mod_timer (flush_track_timer + drive, jiffies + 1); 1399 mod_timer (flush_track_timer + drive, jiffies + 1);
1406 local_irq_restore(flags); 1400 local_irq_restore(flags);
1407 break;
1408 } 1401 }
1409 } 1402 }
1410 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1411 CURRENT->sector += CURRENT->current_nr_sectors;
1412 1403
1413 end_request(CURRENT, 1); 1404 if (__blk_end_request_cur(rq, err))
1414 goto repeat; 1405 goto next_segment;
1406 goto next_req;
1415} 1407}
1416 1408
1417static void do_fd_request(struct request_queue * q) 1409static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4..f5e7180d7f4 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
79#undef DEBUG 79#undef DEBUG
80 80
81static struct request_queue *floppy_queue; 81static struct request_queue *floppy_queue;
82 82static struct request *fd_request;
83#define QUEUE (floppy_queue)
84#define CURRENT elv_next_request(floppy_queue)
85 83
86/* Disk types: DD, HD, ED */ 84/* Disk types: DD, HD, ED */
87static struct atari_disk_type { 85static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
376static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 374static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
377static DEFINE_TIMER(fd_timer, check_change, 0, 0); 375static DEFINE_TIMER(fd_timer, check_change, 0, 0);
378 376
377static void fd_end_request_cur(int err)
378{
379 if (!__blk_end_request_cur(fd_request, err))
380 fd_request = NULL;
381}
382
379static inline void start_motor_off_timer(void) 383static inline void start_motor_off_timer(void)
380{ 384{
381 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); 385 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
606 return; 610 return;
607 } 611 }
608 612
609 if (!CURRENT) 613 if (!fd_request)
610 return; 614 return;
611 615
612 CURRENT->errors++; 616 fd_request->errors++;
613 if (CURRENT->errors >= MAX_ERRORS) { 617 if (fd_request->errors >= MAX_ERRORS) {
614 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); 618 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
615 end_request(CURRENT, 0); 619 fd_end_request_cur(-EIO);
616 } 620 }
617 else if (CURRENT->errors == RECALIBRATE_ERRORS) { 621 else if (fd_request->errors == RECALIBRATE_ERRORS) {
618 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); 622 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
619 if (SelectedDrive != -1) 623 if (SelectedDrive != -1)
620 SUD.track = -1; 624 SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 729 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
726 if (ReqCmd == READ) { 730 if (ReqCmd == READ) {
727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 731 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
728 if (++ReqCnt < CURRENT->current_nr_sectors) { 732 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
729 /* read next sector */ 733 /* read next sector */
730 setup_req_params( drive ); 734 setup_req_params( drive );
731 goto repeat; 735 goto repeat;
732 } 736 }
733 else { 737 else {
734 /* all sectors finished */ 738 /* all sectors finished */
735 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 739 fd_end_request_cur(0);
736 CURRENT->sector += CURRENT->current_nr_sectors;
737 end_request(CURRENT, 1);
738 redo_fd_request(); 740 redo_fd_request();
739 return; 741 return;
740 } 742 }
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (++ReqCnt < CURRENT->current_nr_sectors) { 1137 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
1136 /* read next sector */ 1138 /* read next sector */
1137 setup_req_params( SelectedDrive ); 1139 setup_req_params( SelectedDrive );
1138 do_fd_action( SelectedDrive ); 1140 do_fd_action( SelectedDrive );
1139 } 1141 }
1140 else { 1142 else {
1141 /* all sectors finished */ 1143 /* all sectors finished */
1142 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1144 fd_end_request_cur(0);
1143 CURRENT->sector += CURRENT->current_nr_sectors;
1144 end_request(CURRENT, 1);
1145 redo_fd_request(); 1145 redo_fd_request();
1146 } 1146 }
1147 return; 1147 return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
1382 ReqData = ReqBuffer + 512 * ReqCnt; 1382 ReqData = ReqBuffer + 512 * ReqCnt;
1383 1383
1384 if (UseTrackbuffer) 1384 if (UseTrackbuffer)
1385 read_track = (ReqCmd == READ && CURRENT->errors == 0); 1385 read_track = (ReqCmd == READ && fd_request->errors == 0);
1386 else 1386 else
1387 read_track = 0; 1387 read_track = 0;
1388 1388
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
1396 int drive, type; 1396 int drive, type;
1397 struct atari_floppy_struct *floppy; 1397 struct atari_floppy_struct *floppy;
1398 1398
1399 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1399 DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
1400 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1400 fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
1401 CURRENT ? CURRENT->sector : 0 )); 1401 fd_request ? blk_rq_pos(fd_request) : 0 ));
1402 1402
1403 IsFormatting = 0; 1403 IsFormatting = 0;
1404 1404
1405repeat: 1405repeat:
1406 if (!fd_request) {
1407 fd_request = blk_fetch_request(floppy_queue);
1408 if (!fd_request)
1409 goto the_end;
1410 }
1406 1411
1407 if (!CURRENT) 1412 floppy = fd_request->rq_disk->private_data;
1408 goto the_end;
1409
1410 floppy = CURRENT->rq_disk->private_data;
1411 drive = floppy - unit; 1413 drive = floppy - unit;
1412 type = floppy->type; 1414 type = floppy->type;
1413 1415
1414 if (!UD.connected) { 1416 if (!UD.connected) {
1415 /* drive not connected */ 1417 /* drive not connected */
1416 printk(KERN_ERR "Unknown Device: fd%d\n", drive ); 1418 printk(KERN_ERR "Unknown Device: fd%d\n", drive );
1417 end_request(CURRENT, 0); 1419 fd_end_request_cur(-EIO);
1418 goto repeat; 1420 goto repeat;
1419 } 1421 }
1420 1422
@@ -1430,12 +1432,12 @@ repeat:
1430 /* user supplied disk type */ 1432 /* user supplied disk type */
1431 if (--type >= NUM_DISK_MINORS) { 1433 if (--type >= NUM_DISK_MINORS) {
1432 printk(KERN_WARNING "fd%d: invalid disk format", drive ); 1434 printk(KERN_WARNING "fd%d: invalid disk format", drive );
1433 end_request(CURRENT, 0); 1435 fd_end_request_cur(-EIO);
1434 goto repeat; 1436 goto repeat;
1435 } 1437 }
1436 if (minor2disktype[type].drive_types > DriveType) { 1438 if (minor2disktype[type].drive_types > DriveType) {
1437 printk(KERN_WARNING "fd%d: unsupported disk format", drive ); 1439 printk(KERN_WARNING "fd%d: unsupported disk format", drive );
1438 end_request(CURRENT, 0); 1440 fd_end_request_cur(-EIO);
1439 goto repeat; 1441 goto repeat;
1440 } 1442 }
1441 type = minor2disktype[type].index; 1443 type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
1444 UD.autoprobe = 0; 1446 UD.autoprobe = 0;
1445 } 1447 }
1446 1448
1447 if (CURRENT->sector + 1 > UDT->blocks) { 1449 if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
1448 end_request(CURRENT, 0); 1450 fd_end_request_cur(-EIO);
1449 goto repeat; 1451 goto repeat;
1450 } 1452 }
1451 1453
@@ -1453,9 +1455,9 @@ repeat:
1453 del_timer( &motor_off_timer ); 1455 del_timer( &motor_off_timer );
1454 1456
1455 ReqCnt = 0; 1457 ReqCnt = 0;
1456 ReqCmd = rq_data_dir(CURRENT); 1458 ReqCmd = rq_data_dir(fd_request);
1457 ReqBlock = CURRENT->sector; 1459 ReqBlock = blk_rq_pos(fd_request);
1458 ReqBuffer = CURRENT->buffer; 1460 ReqBuffer = fd_request->buffer;
1459 setup_req_params( drive ); 1461 setup_req_params( drive );
1460 do_fd_action( drive ); 1462 do_fd_action( drive );
1461 1463
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa..e714e7cce6f 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1299,7 +1299,6 @@ static void cciss_softirq_done(struct request *rq)
1299{ 1299{
1300 CommandList_struct *cmd = rq->completion_data; 1300 CommandList_struct *cmd = rq->completion_data;
1301 ctlr_info_t *h = hba[cmd->ctlr]; 1301 ctlr_info_t *h = hba[cmd->ctlr];
1302 unsigned int nr_bytes;
1303 unsigned long flags; 1302 unsigned long flags;
1304 u64bit temp64; 1303 u64bit temp64;
1305 int i, ddir; 1304 int i, ddir;
@@ -1321,15 +1320,11 @@ static void cciss_softirq_done(struct request *rq)
1321 printk("Done with %p\n", rq); 1320 printk("Done with %p\n", rq);
1322#endif /* CCISS_DEBUG */ 1321#endif /* CCISS_DEBUG */
1323 1322
1324 /* 1323 /* set the residual count for pc requests */
1325 * Store the full size and set the residual count for pc requests
1326 */
1327 nr_bytes = blk_rq_bytes(rq);
1328 if (blk_pc_request(rq)) 1324 if (blk_pc_request(rq))
1329 rq->data_len = cmd->err_info->ResidualCnt; 1325 rq->resid_len = cmd->err_info->ResidualCnt;
1330 1326
1331 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) 1327 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1332 BUG();
1333 1328
1334 spin_lock_irqsave(&h->lock, flags); 1329 spin_lock_irqsave(&h->lock, flags);
1335 cmd_free(h, cmd, 1); 1330 cmd_free(h, cmd, 1);
@@ -2691,7 +2686,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2691 printk(KERN_WARNING "cciss: cmd %p has" 2686 printk(KERN_WARNING "cciss: cmd %p has"
2692 " completed with data underrun " 2687 " completed with data underrun "
2693 "reported\n", cmd); 2688 "reported\n", cmd);
2694 cmd->rq->data_len = cmd->err_info->ResidualCnt; 2689 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
2695 } 2690 }
2696 break; 2691 break;
2697 case CMD_DATA_OVERRUN: 2692 case CMD_DATA_OVERRUN:
@@ -2806,7 +2801,7 @@ static void do_cciss_request(struct request_queue *q)
2806 goto startio; 2801 goto startio;
2807 2802
2808 queue: 2803 queue:
2809 creq = elv_next_request(q); 2804 creq = blk_peek_request(q);
2810 if (!creq) 2805 if (!creq)
2811 goto startio; 2806 goto startio;
2812 2807
@@ -2815,7 +2810,7 @@ static void do_cciss_request(struct request_queue *q)
2815 if ((c = cmd_alloc(h, 1)) == NULL) 2810 if ((c = cmd_alloc(h, 1)) == NULL)
2816 goto full; 2811 goto full;
2817 2812
2818 blkdev_dequeue_request(creq); 2813 blk_start_request(creq);
2819 2814
2820 spin_unlock_irq(q->queue_lock); 2815 spin_unlock_irq(q->queue_lock);
2821 2816
@@ -2840,10 +2835,10 @@ static void do_cciss_request(struct request_queue *q)
2840 c->Request.Timeout = 0; // Don't time out 2835 c->Request.Timeout = 0; // Don't time out
2841 c->Request.CDB[0] = 2836 c->Request.CDB[0] =
2842 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2837 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2843 start_blk = creq->sector; 2838 start_blk = blk_rq_pos(creq);
2844#ifdef CCISS_DEBUG 2839#ifdef CCISS_DEBUG
2845 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 2840 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
2846 (int)creq->nr_sectors); 2841 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
2847#endif /* CCISS_DEBUG */ 2842#endif /* CCISS_DEBUG */
2848 2843
2849 sg_init_table(tmp_sg, MAXSGENTRIES); 2844 sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +2864,8 @@ static void do_cciss_request(struct request_queue *q)
2869 h->maxSG = seg; 2864 h->maxSG = seg;
2870 2865
2871#ifdef CCISS_DEBUG 2866#ifdef CCISS_DEBUG
2872 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 2867 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
2873 creq->nr_sectors, seg); 2868 blk_rq_sectors(creq), seg);
2874#endif /* CCISS_DEBUG */ 2869#endif /* CCISS_DEBUG */
2875 2870
2876 c->Header.SGList = c->Header.SGTotal = seg; 2871 c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +2877,8 @@ static void do_cciss_request(struct request_queue *q)
2882 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 2877 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2883 c->Request.CDB[5] = start_blk & 0xff; 2878 c->Request.CDB[5] = start_blk & 0xff;
2884 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 2879 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2885 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 2880 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
2886 c->Request.CDB[8] = creq->nr_sectors & 0xff; 2881 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
2887 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 2882 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2888 } else { 2883 } else {
2889 u32 upper32 = upper_32_bits(start_blk); 2884 u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +2893,10 @@ static void do_cciss_request(struct request_queue *q)
2898 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 2893 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2899 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 2894 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2900 c->Request.CDB[9]= start_blk & 0xff; 2895 c->Request.CDB[9]= start_blk & 0xff;
2901 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 2896 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
2902 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 2897 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
2903 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 2898 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
2904 c->Request.CDB[13]= creq->nr_sectors & 0xff; 2899 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
2905 c->Request.CDB[14] = c->Request.CDB[15] = 0; 2900 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2906 } 2901 }
2907 } else if (blk_pc_request(creq)) { 2902 } else if (blk_pc_request(creq)) {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca1115..a02dcfc00f1 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
903 goto startio; 903 goto startio;
904 904
905queue_next: 905queue_next:
906 creq = elv_next_request(q); 906 creq = blk_peek_request(q);
907 if (!creq) 907 if (!creq)
908 goto startio; 908 goto startio;
909 909
@@ -912,17 +912,18 @@ queue_next:
912 if ((c = cmd_alloc(h,1)) == NULL) 912 if ((c = cmd_alloc(h,1)) == NULL)
913 goto startio; 913 goto startio;
914 914
915 blkdev_dequeue_request(creq); 915 blk_start_request(creq);
916 916
917 c->ctlr = h->ctlr; 917 c->ctlr = h->ctlr;
918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
919 c->hdr.size = sizeof(rblk_t) >> 2; 919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t); 920 c->size += sizeof(rblk_t);
921 921
922 c->req.hdr.blk = creq->sector; 922 c->req.hdr.blk = blk_rq_pos(creq);
923 c->rq = creq; 923 c->rq = creq;
924DBGPX( 924DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 printk("sector=%d, nr_sectors=%u\n",
926 blk_rq_pos(creq), blk_rq_sectors(creq));
926); 927);
927 sg_init_table(tmp_sg, SG_MAX); 928 sg_init_table(tmp_sg, SG_MAX);
928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
940 tmp_sg[i].offset, 941 tmp_sg[i].offset,
941 tmp_sg[i].length, dir); 942 tmp_sg[i].length, dir);
942 } 943 }
943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.sg_cnt = seg;
945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
947 c->type = CMD_RWREQ; 948 c->type = CMD_RWREQ;
948 949
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1024 cmd->req.sg[i].size, ddir); 1025 cmd->req.sg[i].size, ddir);
1025 1026
1026 DBGPX(printk("Done with %p\n", rq);); 1027 DBGPX(printk("Done with %p\n", rq););
1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1028 __blk_end_request_all(rq, error);
1028 BUG();
1029} 1029}
1030 1030
1031/* 1031/*
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f164..90877fee0ee 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
931 del_timer(&fd_timeout); 931 del_timer(&fd_timeout);
932 cont = NULL; 932 cont = NULL;
933 clear_bit(0, &fdc_busy); 933 clear_bit(0, &fdc_busy);
934 if (elv_next_request(floppy_queue)) 934 if (current_req || blk_peek_request(floppy_queue))
935 do_fd_request(floppy_queue); 935 do_fd_request(floppy_queue);
936 spin_unlock_irqrestore(&floppy_lock, flags); 936 spin_unlock_irqrestore(&floppy_lock, flags);
937 wake_up(&fdc_wait); 937 wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
2303 2303
2304 /* current_count_sectors can be zero if transfer failed */ 2304 /* current_count_sectors can be zero if transfer failed */
2305 if (error) 2305 if (error)
2306 nr_sectors = req->current_nr_sectors; 2306 nr_sectors = blk_rq_cur_sectors(req);
2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2307 if (__blk_end_request(req, error, nr_sectors << 9))
2308 return; 2308 return;
2309 2309
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
2332 if (uptodate) { 2332 if (uptodate) {
2333 /* maintain values for invalidation on geometry 2333 /* maintain values for invalidation on geometry
2334 * change */ 2334 * change */
2335 block = current_count_sectors + req->sector; 2335 block = current_count_sectors + blk_rq_pos(req);
2336 INFBOUND(DRS->maxblock, block); 2336 INFBOUND(DRS->maxblock, block);
2337 if (block > _floppy->sect) 2337 if (block > _floppy->sect)
2338 DRS->maxtrack = 1; 2338 DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
2346 /* record write error information */ 2346 /* record write error information */
2347 DRWE->write_errors++; 2347 DRWE->write_errors++;
2348 if (DRWE->write_errors == 1) { 2348 if (DRWE->write_errors == 1) {
2349 DRWE->first_error_sector = req->sector; 2349 DRWE->first_error_sector = blk_rq_pos(req);
2350 DRWE->first_error_generation = DRS->generation; 2350 DRWE->first_error_generation = DRS->generation;
2351 } 2351 }
2352 DRWE->last_error_sector = req->sector; 2352 DRWE->last_error_sector = blk_rq_pos(req);
2353 DRWE->last_error_generation = DRS->generation; 2353 DRWE->last_error_generation = DRS->generation;
2354 } 2354 }
2355 spin_lock_irqsave(q->queue_lock, flags); 2355 spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2503 2503
2504 max_sector = transfer_size(ssize, 2504 max_sector = transfer_size(ssize,
2505 min(max_sector, max_sector_2), 2505 min(max_sector, max_sector_2),
2506 current_req->nr_sectors); 2506 blk_rq_sectors(current_req));
2507 2507
2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2509 buffer_max > fsector_t + current_req->nr_sectors) 2509 buffer_max > fsector_t + blk_rq_sectors(current_req))
2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2510 current_count_sectors = min_t(int, buffer_max - fsector_t,
2511 current_req->nr_sectors); 2511 blk_rq_sectors(current_req));
2512 2512
2513 remaining = current_count_sectors << 9; 2513 remaining = current_count_sectors << 9;
2514#ifdef FLOPPY_SANITY_CHECK 2514#ifdef FLOPPY_SANITY_CHECK
2515 if ((remaining >> 9) > current_req->nr_sectors && 2515 if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
2516 CT(COMMAND) == FD_WRITE) {
2517 DPRINT("in copy buffer\n"); 2516 DPRINT("in copy buffer\n");
2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2517 printk("current_count_sectors=%ld\n", current_count_sectors);
2519 printk("remaining=%d\n", remaining >> 9); 2518 printk("remaining=%d\n", remaining >> 9);
2520 printk("current_req->nr_sectors=%ld\n", 2519 printk("current_req->nr_sectors=%u\n",
2521 current_req->nr_sectors); 2520 blk_rq_sectors(current_req));
2522 printk("current_req->current_nr_sectors=%u\n", 2521 printk("current_req->current_nr_sectors=%u\n",
2523 current_req->current_nr_sectors); 2522 blk_rq_cur_sectors(current_req));
2524 printk("max_sector=%d\n", max_sector); 2523 printk("max_sector=%d\n", max_sector);
2525 printk("ssize=%d\n", ssize); 2524 printk("ssize=%d\n", ssize);
2526 } 2525 }
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2530 2529
2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2530 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2532 2531
2533 size = current_req->current_nr_sectors << 9; 2532 size = blk_rq_cur_bytes(current_req);
2534 2533
2535 rq_for_each_segment(bv, current_req, iter) { 2534 rq_for_each_segment(bv, current_req, iter) {
2536 if (!remaining) 2535 if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
2648 2647
2649 max_sector = _floppy->sect * _floppy->head; 2648 max_sector = _floppy->sect * _floppy->head;
2650 2649
2651 TRACK = (int)current_req->sector / max_sector; 2650 TRACK = (int)blk_rq_pos(current_req) / max_sector;
2652 fsector_t = (int)current_req->sector % max_sector; 2651 fsector_t = (int)blk_rq_pos(current_req) % max_sector;
2653 if (_floppy->track && TRACK >= _floppy->track) { 2652 if (_floppy->track && TRACK >= _floppy->track) {
2654 if (current_req->current_nr_sectors & 1) { 2653 if (blk_rq_cur_sectors(current_req) & 1) {
2655 current_count_sectors = 1; 2654 current_count_sectors = 1;
2656 return 1; 2655 return 1;
2657 } else 2656 } else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
2669 if (fsector_t >= max_sector) { 2668 if (fsector_t >= max_sector) {
2670 current_count_sectors = 2669 current_count_sectors =
2671 min_t(int, _floppy->sect - fsector_t, 2670 min_t(int, _floppy->sect - fsector_t,
2672 current_req->nr_sectors); 2671 blk_rq_sectors(current_req));
2673 return 1; 2672 return 1;
2674 } 2673 }
2675 SIZECODE = 2; 2674 SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
2720 2719
2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2720 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2722 aligned_sector_t = fsector_t - in_sector_offset; 2721 aligned_sector_t = fsector_t - in_sector_offset;
2723 max_size = current_req->nr_sectors; 2722 max_size = blk_rq_sectors(current_req);
2724 if ((raw_cmd->track == buffer_track) && 2723 if ((raw_cmd->track == buffer_track) &&
2725 (current_drive == buffer_drive) && 2724 (current_drive == buffer_drive) &&
2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { 2725 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
2729 copy_buffer(1, max_sector, buffer_max); 2728 copy_buffer(1, max_sector, buffer_max);
2730 return 1; 2729 return 1;
2731 } 2730 }
2732 } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2731 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
2733 if (CT(COMMAND) == FD_WRITE) { 2732 if (CT(COMMAND) == FD_WRITE) {
2734 if (fsector_t + current_req->nr_sectors > ssize && 2733 if (fsector_t + blk_rq_sectors(current_req) > ssize &&
2735 fsector_t + current_req->nr_sectors < ssize + ssize) 2734 fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
2736 max_size = ssize + ssize; 2735 max_size = ssize + ssize;
2737 else 2736 else
2738 max_size = ssize; 2737 max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
2776 (indirect * 2 > direct * 3 && 2775 (indirect * 2 > direct * 3 &&
2777 *errors < DP->max_errors.read_track && ((!probing 2776 *errors < DP->max_errors.read_track && ((!probing
2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2777 || (DP->read_track & (1 << DRS->probed_format)))))) {
2779 max_size = current_req->nr_sectors; 2778 max_size = blk_rq_sectors(current_req);
2780 } else { 2779 } else {
2781 raw_cmd->kernel_data = current_req->buffer; 2780 raw_cmd->kernel_data = current_req->buffer;
2782 raw_cmd->length = current_count_sectors << 9; 2781 raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
2801 fsector_t > buffer_max || 2800 fsector_t > buffer_max ||
2802 fsector_t < buffer_min || 2801 fsector_t < buffer_min ||
2803 ((CT(COMMAND) == FD_READ || 2802 ((CT(COMMAND) == FD_READ ||
2804 (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2803 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2804 max_sector > 2 * max_buffer_sectors + buffer_min &&
2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2805 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2807 /* not enough space */ 2806 /* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
2879 printk("write\n"); 2878 printk("write\n");
2880 return 0; 2879 return 0;
2881 } 2880 }
2882 } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2881 } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
2883 current_count_sectors > current_req->nr_sectors) { 2882 current_count_sectors > blk_rq_sectors(current_req)) {
2884 DPRINT("buffer overrun in direct transfer\n"); 2883 DPRINT("buffer overrun in direct transfer\n");
2885 return 0; 2884 return 0;
2886 } else if (raw_cmd->length < current_count_sectors << 9) { 2885 } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
2913 struct request *req; 2912 struct request *req;
2914 2913
2915 spin_lock_irq(floppy_queue->queue_lock); 2914 spin_lock_irq(floppy_queue->queue_lock);
2916 req = elv_next_request(floppy_queue); 2915 req = blk_fetch_request(floppy_queue);
2917 spin_unlock_irq(floppy_queue->queue_lock); 2916 spin_unlock_irq(floppy_queue->queue_lock);
2918 if (!req) { 2917 if (!req) {
2919 do_floppy = NULL; 2918 do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
2990 if (usage_count == 0) { 2989 if (usage_count == 0) {
2991 printk("warning: usage count=0, current_req=%p exiting\n", 2990 printk("warning: usage count=0, current_req=%p exiting\n",
2992 current_req); 2991 current_req);
2993 printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2992 printk("sect=%ld type=%x flags=%x\n",
2994 current_req->cmd_type, current_req->cmd_flags); 2993 (long)blk_rq_pos(current_req), current_req->cmd_type,
2994 current_req->cmd_flags);
2995 return; 2995 return;
2996 } 2996 }
2997 if (test_bit(0, &fdc_busy)) { 2997 if (test_bit(0, &fdc_busy)) {
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e5..961de56d00a 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
98 98
99static DEFINE_SPINLOCK(hd_lock); 99static DEFINE_SPINLOCK(hd_lock);
100static struct request_queue *hd_queue; 100static struct request_queue *hd_queue;
101static struct request *hd_req;
101 102
102#define MAJOR_NR HD_MAJOR 103#define MAJOR_NR HD_MAJOR
103#define QUEUE (hd_queue)
104#define CURRENT elv_next_request(hd_queue)
105 104
106#define TIMEOUT_VALUE (6*HZ) 105#define TIMEOUT_VALUE (6*HZ)
107#define HD_DELAY 0 106#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
195 NR_HD = hdind+1; 194 NR_HD = hdind+1;
196} 195}
197 196
197static bool hd_end_request(int err, unsigned int bytes)
198{
199 if (__blk_end_request(hd_req, err, bytes))
200 return true;
201 hd_req = NULL;
202 return false;
203}
204
205static bool hd_end_request_cur(int err)
206{
207 return hd_end_request(err, blk_rq_cur_bytes(hd_req));
208}
209
198static void dump_status(const char *msg, unsigned int stat) 210static void dump_status(const char *msg, unsigned int stat)
199{ 211{
200 char *name = "hd?"; 212 char *name = "hd?";
201 if (CURRENT) 213 if (hd_req)
202 name = CURRENT->rq_disk->disk_name; 214 name = hd_req->rq_disk->disk_name;
203 215
204#ifdef VERBOSE_ERRORS 216#ifdef VERBOSE_ERRORS
205 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 217 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
227 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { 239 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 240 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 241 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
230 if (CURRENT) 242 if (hd_req)
231 printk(", sector=%ld", CURRENT->sector); 243 printk(", sector=%ld", blk_rq_pos(hd_req));
232 } 244 }
233 printk("\n"); 245 printk("\n");
234 } 246 }
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
406 */ 418 */
407static void bad_rw_intr(void) 419static void bad_rw_intr(void)
408{ 420{
409 struct request *req = CURRENT; 421 struct request *req = hd_req;
422
410 if (req != NULL) { 423 if (req != NULL) {
411 struct hd_i_struct *disk = req->rq_disk->private_data; 424 struct hd_i_struct *disk = req->rq_disk->private_data;
412 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { 425 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
413 end_request(req, 0); 426 hd_end_request_cur(-EIO);
414 disk->special_op = disk->recalibrate = 1; 427 disk->special_op = disk->recalibrate = 1;
415 } else if (req->errors % RESET_FREQ == 0) 428 } else if (req->errors % RESET_FREQ == 0)
416 reset = 1; 429 reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
452 bad_rw_intr(); 465 bad_rw_intr();
453 hd_request(); 466 hd_request();
454 return; 467 return;
468
455ok_to_read: 469ok_to_read:
456 req = CURRENT; 470 req = hd_req;
457 insw(HD_DATA, req->buffer, 256); 471 insw(HD_DATA, req->buffer, 256);
458 req->sector++;
459 req->buffer += 512;
460 req->errors = 0;
461 i = --req->nr_sectors;
462 --req->current_nr_sectors;
463#ifdef DEBUG 472#ifdef DEBUG
464 printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 473 printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
465 req->rq_disk->disk_name, req->sector, req->nr_sectors, 474 req->rq_disk->disk_name, blk_rq_pos(req) + 1,
466 req->buffer+512); 475 blk_rq_sectors(req) - 1, req->buffer+512);
467#endif 476#endif
468 if (req->current_nr_sectors <= 0) 477 if (hd_end_request(0, 512)) {
469 end_request(req, 1);
470 if (i > 0) {
471 SET_HANDLER(&read_intr); 478 SET_HANDLER(&read_intr);
472 return; 479 return;
473 } 480 }
481
474 (void) inb_p(HD_STATUS); 482 (void) inb_p(HD_STATUS);
475#if (HD_DELAY > 0) 483#if (HD_DELAY > 0)
476 last_req = read_timer(); 484 last_req = read_timer();
477#endif 485#endif
478 if (elv_next_request(QUEUE)) 486 hd_request();
479 hd_request();
480 return;
481} 487}
482 488
483static void write_intr(void) 489static void write_intr(void)
484{ 490{
485 struct request *req = CURRENT; 491 struct request *req = hd_req;
486 int i; 492 int i;
487 int retries = 100000; 493 int retries = 100000;
488 494
@@ -492,30 +498,25 @@ static void write_intr(void)
492 continue; 498 continue;
493 if (!OK_STATUS(i)) 499 if (!OK_STATUS(i))
494 break; 500 break;
495 if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 501 if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
496 goto ok_to_write; 502 goto ok_to_write;
497 } while (--retries > 0); 503 } while (--retries > 0);
498 dump_status("write_intr", i); 504 dump_status("write_intr", i);
499 bad_rw_intr(); 505 bad_rw_intr();
500 hd_request(); 506 hd_request();
501 return; 507 return;
508
502ok_to_write: 509ok_to_write:
503 req->sector++; 510 if (hd_end_request(0, 512)) {
504 i = --req->nr_sectors;
505 --req->current_nr_sectors;
506 req->buffer += 512;
507 if (!i || (req->bio && req->current_nr_sectors <= 0))
508 end_request(req, 1);
509 if (i > 0) {
510 SET_HANDLER(&write_intr); 511 SET_HANDLER(&write_intr);
511 outsw(HD_DATA, req->buffer, 256); 512 outsw(HD_DATA, req->buffer, 256);
512 } else { 513 return;
514 }
515
513#if (HD_DELAY > 0) 516#if (HD_DELAY > 0)
514 last_req = read_timer(); 517 last_req = read_timer();
515#endif 518#endif
516 hd_request(); 519 hd_request();
517 }
518 return;
519} 520}
520 521
521static void recal_intr(void) 522static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
537 538
538 do_hd = NULL; 539 do_hd = NULL;
539 540
540 if (!CURRENT) 541 if (!hd_req)
541 return; 542 return;
542 543
543 spin_lock_irq(hd_queue->queue_lock); 544 spin_lock_irq(hd_queue->queue_lock);
544 reset = 1; 545 reset = 1;
545 name = CURRENT->rq_disk->disk_name; 546 name = hd_req->rq_disk->disk_name;
546 printk("%s: timeout\n", name); 547 printk("%s: timeout\n", name);
547 if (++CURRENT->errors >= MAX_ERRORS) { 548 if (++hd_req->errors >= MAX_ERRORS) {
548#ifdef DEBUG 549#ifdef DEBUG
549 printk("%s: too many errors\n", name); 550 printk("%s: too many errors\n", name);
550#endif 551#endif
551 end_request(CURRENT, 0); 552 hd_end_request_cur(-EIO);
552 } 553 }
553 hd_request(); 554 hd_request();
554 spin_unlock_irq(hd_queue->queue_lock); 555 spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
563 } 564 }
564 if (disk->head > 16) { 565 if (disk->head > 16) {
565 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
566 end_request(req, 0); 567 hd_end_request_cur(-EIO);
567 } 568 }
568 disk->special_op = 0; 569 disk->special_op = 0;
569 return 1; 570 return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
590repeat: 591repeat:
591 del_timer(&device_timer); 592 del_timer(&device_timer);
592 593
593 req = CURRENT; 594 if (!hd_req) {
594 if (!req) { 595 hd_req = blk_fetch_request(hd_queue);
595 do_hd = NULL; 596 if (!hd_req) {
596 return; 597 do_hd = NULL;
598 return;
599 }
597 } 600 }
601 req = hd_req;
598 602
599 if (reset) { 603 if (reset) {
600 reset_hd(); 604 reset_hd();
601 return; 605 return;
602 } 606 }
603 disk = req->rq_disk->private_data; 607 disk = req->rq_disk->private_data;
604 block = req->sector; 608 block = blk_rq_pos(req);
605 nsect = req->nr_sectors; 609 nsect = blk_rq_sectors(req);
606 if (block >= get_capacity(req->rq_disk) || 610 if (block >= get_capacity(req->rq_disk) ||
607 ((block+nsect) > get_capacity(req->rq_disk))) { 611 ((block+nsect) > get_capacity(req->rq_disk))) {
608 printk("%s: bad access: block=%d, count=%d\n", 612 printk("%s: bad access: block=%d, count=%d\n",
609 req->rq_disk->disk_name, block, nsect); 613 req->rq_disk->disk_name, block, nsect);
610 end_request(req, 0); 614 hd_end_request_cur(-EIO);
611 goto repeat; 615 goto repeat;
612 } 616 }
613 617
@@ -647,7 +651,7 @@ repeat:
647 break; 651 break;
648 default: 652 default:
649 printk("unknown hd-command\n"); 653 printk("unknown hd-command\n");
650 end_request(req, 0); 654 hd_end_request_cur(-EIO);
651 break; 655 break;
652 } 656 }
653 } 657 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae8082589..801f4ab8330 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
511 */ 511 */
512static void loop_add_bio(struct loop_device *lo, struct bio *bio) 512static void loop_add_bio(struct loop_device *lo, struct bio *bio)
513{ 513{
514 if (lo->lo_biotail) { 514 bio_list_add(&lo->lo_bio_list, bio);
515 lo->lo_biotail->bi_next = bio;
516 lo->lo_biotail = bio;
517 } else
518 lo->lo_bio = lo->lo_biotail = bio;
519} 515}
520 516
521/* 517/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
523 */ 519 */
524static struct bio *loop_get_bio(struct loop_device *lo) 520static struct bio *loop_get_bio(struct loop_device *lo)
525{ 521{
526 struct bio *bio; 522 return bio_list_pop(&lo->lo_bio_list);
527
528 if ((bio = lo->lo_bio)) {
529 if (bio == lo->lo_biotail)
530 lo->lo_biotail = NULL;
531 lo->lo_bio = bio->bi_next;
532 bio->bi_next = NULL;
533 }
534
535 return bio;
536} 523}
537 524
538static int loop_make_request(struct request_queue *q, struct bio *old_bio) 525static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
609 596
610 set_user_nice(current, -20); 597 set_user_nice(current, -20);
611 598
612 while (!kthread_should_stop() || lo->lo_bio) { 599 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
613 600
614 wait_event_interruptible(lo->lo_event, 601 wait_event_interruptible(lo->lo_event,
615 lo->lo_bio || kthread_should_stop()); 602 !bio_list_empty(&lo->lo_bio_list) ||
603 kthread_should_stop());
616 604
617 if (!lo->lo_bio) 605 if (bio_list_empty(&lo->lo_bio_list))
618 continue; 606 continue;
619 spin_lock_irq(&lo->lo_lock); 607 spin_lock_irq(&lo->lo_lock);
620 bio = loop_get_bio(lo); 608 bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
721 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 709 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
722 goto out_putf; 710 goto out_putf;
723 711
724 /* new backing store needs to support loop (eg splice_read) */
725 if (!inode->i_fop->splice_read)
726 goto out_putf;
727
728 /* size of the new backing store needs to be the same */ 712 /* size of the new backing store needs to be the same */
729 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 713 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
730 goto out_putf; 714 goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
800 error = -EINVAL; 784 error = -EINVAL;
801 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 785 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
802 const struct address_space_operations *aops = mapping->a_ops; 786 const struct address_space_operations *aops = mapping->a_ops;
803 /* 787
804 * If we can't read - sorry. If we only can't write - well,
805 * it's going to be read-only.
806 */
807 if (!file->f_op->splice_read)
808 goto out_putf;
809 if (aops->write_begin) 788 if (aops->write_begin)
810 lo_flags |= LO_FLAGS_USE_AOPS; 789 lo_flags |= LO_FLAGS_USE_AOPS;
811 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 790 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
841 lo->old_gfp_mask = mapping_gfp_mask(mapping); 820 lo->old_gfp_mask = mapping_gfp_mask(mapping);
842 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 821 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
843 822
844 lo->lo_bio = lo->lo_biotail = NULL; 823 bio_list_init(&lo->lo_bio_list);
845 824
846 /* 825 /*
847 * set queue make_request_fn, and add limits based on lower level 826 * set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a..c0cd0a03f69 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/hdreg.h> 19#include <linux/hdreg.h>
20#include <linux/libata.h> 20#include <linux/ata.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26 25
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) 26#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28 27
28/* name for block device */
29#define MG_DISK_NAME "mgd"
30/* name for platform device */
31#define MG_DEV_NAME "mg_disk"
32
33#define MG_DISK_MAJ 0
34#define MG_DISK_MAX_PART 16
35#define MG_SECTOR_SIZE 512
36#define MG_MAX_SECTS 256
37
38/* Register offsets */
39#define MG_BUFF_OFFSET 0x8000
40#define MG_STORAGE_BUFFER_SIZE 0x200
41#define MG_REG_OFFSET 0xC000
42#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53
54/* handy status */
55#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
56#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
57 ATA_ERR))) == MG_STAT_READY)
58
59/* error code for others */
60#define MG_ERR_NONE 0
61#define MG_ERR_TIMEOUT 0x100
62#define MG_ERR_INIT_STAT 0x101
63#define MG_ERR_TRANSLATION 0x102
64#define MG_ERR_CTRL_RST 0x103
65#define MG_ERR_INV_STAT 0x104
66#define MG_ERR_RSTOUT 0x105
67
68#define MG_MAX_ERRORS 6 /* Max read/write errors */
69
70/* command */
71#define MG_CMD_RD 0x20
72#define MG_CMD_WR 0x30
73#define MG_CMD_SLEEP 0x99
74#define MG_CMD_WAKEUP 0xC3
75#define MG_CMD_ID 0xEC
76#define MG_CMD_WR_CONF 0x3C
77#define MG_CMD_RD_CONF 0x40
78
79/* operation mode */
80#define MG_OP_CASCADE (1 << 0)
81#define MG_OP_CASCADE_SYNC_RD (1 << 1)
82#define MG_OP_CASCADE_SYNC_WR (1 << 2)
83#define MG_OP_INTERLEAVE (1 << 3)
84
85/* synchronous */
86#define MG_BURST_LAT_4 (3 << 4)
87#define MG_BURST_LAT_5 (4 << 4)
88#define MG_BURST_LAT_6 (5 << 4)
89#define MG_BURST_LAT_7 (6 << 4)
90#define MG_BURST_LAT_8 (7 << 4)
91#define MG_BURST_LEN_4 (1 << 1)
92#define MG_BURST_LEN_8 (2 << 1)
93#define MG_BURST_LEN_16 (3 << 1)
94#define MG_BURST_LEN_32 (4 << 1)
95#define MG_BURST_LEN_CONT (0 << 1)
96
97/* timeout value (unit: ms) */
98#define MG_TMAX_CONF_TO_CMD 1
99#define MG_TMAX_WAIT_RD_DRQ 10
100#define MG_TMAX_WAIT_WR_DRQ 500
101#define MG_TMAX_RST_TO_BUSY 10
102#define MG_TMAX_HDRST_TO_RDY 500
103#define MG_TMAX_SWRST_TO_RDY 500
104#define MG_TMAX_RSTOUT 3000
105
106/* device attribution */
107/* use mflash as boot device */
108#define MG_BOOT_DEV (1 << 0)
109/* use mflash as storage device */
110#define MG_STORAGE_DEV (1 << 1)
111/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
112#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
113
114#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
115
116/* names of GPIO resource */
117#define MG_RST_PIN "mg_rst"
118/* except MG_BOOT_DEV, reset-out pin should be assigned */
119#define MG_RSTOUT_PIN "mg_rstout"
120
121/* private driver data */
122struct mg_drv_data {
123 /* disk resource */
124 u32 use_polling;
125
126 /* device attribution */
127 u32 dev_attr;
128
129 /* internally used */
130 struct mg_host *host;
131};
132
133/* main structure for mflash driver */
134struct mg_host {
135 struct device *dev;
136
137 struct request_queue *breq;
138 struct request *req;
139 spinlock_t lock;
140 struct gendisk *gd;
141
142 struct timer_list timer;
143 void (*mg_do_intr) (struct mg_host *);
144
145 u16 id[ATA_ID_WORDS];
146
147 u16 cyls;
148 u16 heads;
149 u16 sectors;
150 u32 n_sectors;
151 u32 nres_sectors;
152
153 void __iomem *dev_base;
154 unsigned int irq;
155 unsigned int rst;
156 unsigned int rstout;
157
158 u32 major;
159 u32 error;
160};
161
162/*
163 * Debugging macro and defines
164 */
165#undef DO_MG_DEBUG
166#ifdef DO_MG_DEBUG
167# define MG_DBG(fmt, args...) \
168 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
169#else /* CONFIG_MG_DEBUG */
170# define MG_DBG(fmt, args...) do { } while (0)
171#endif /* CONFIG_MG_DEBUG */
172
29static void mg_request(struct request_queue *); 173static void mg_request(struct request_queue *);
30 174
175static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
176{
177 if (__blk_end_request(host->req, err, nr_bytes))
178 return true;
179
180 host->req = NULL;
181 return false;
182}
183
184static bool mg_end_request_cur(struct mg_host *host, int err)
185{
186 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
187}
188
31static void mg_dump_status(const char *msg, unsigned int stat, 189static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host) 190 struct mg_host *host)
33{ 191{
34 char *name = MG_DISK_NAME; 192 char *name = MG_DISK_NAME;
35 struct request *req;
36 193
37 if (host->breq) { 194 if (host->req)
38 req = elv_next_request(host->breq); 195 name = host->req->rq_disk->disk_name;
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42 196
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 197 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY) 198 if (stat & ATA_BUSY)
45 printk("Busy "); 199 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY) 200 if (stat & ATA_DRDY)
47 printk("DriveReady "); 201 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT) 202 if (stat & ATA_DF)
49 printk("WriteFault "); 203 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE) 204 if (stat & ATA_DSC)
51 printk("SeekComplete "); 205 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ) 206 if (stat & ATA_DRQ)
53 printk("DataRequest "); 207 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR) 208 if (stat & ATA_CORR)
55 printk("CorrectedError "); 209 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR) 210 if (stat & ATA_ERR)
57 printk("Error "); 211 printk("Error ");
58 printk("}\n"); 212 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) { 213 if ((stat & ATA_ERR) == 0) {
60 host->error = 0; 214 host->error = 0;
61 } else { 215 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); 216 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, 217 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff); 218 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK) 219 if (host->error & ATA_BBK)
66 printk("BadSector "); 220 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC) 221 if (host->error & ATA_UNC)
68 printk("UncorrectableError "); 222 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF) 223 if (host->error & ATA_IDNF)
70 printk("SectorIdNotFound "); 224 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT) 225 if (host->error & ATA_ABORTED)
72 printk("DriveStatusError "); 226 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF) 227 if (host->error & ATA_AMNF)
74 printk("AddrMarkNotFound "); 228 printk("AddrMarkNotFound ");
75 printk("}"); 229 printk("}");
76 if (host->error & 230 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC | 231 if (host->req)
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) { 232 printk(", sector=%u",
79 if (host->breq) { 233 (unsigned int)blk_rq_pos(host->req));
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%u", (u32)req->sector);
83 }
84
85 } 234 }
86 printk("\n"); 235 printk("\n");
87 } 236 }
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
100 249
101 do { 250 do {
102 cur_jiffies = jiffies; 251 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) { 252 if (status & ATA_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY) 253 if (expect == ATA_BUSY)
105 break; 254 break;
106 } else { 255 } else {
107 /* Check the error condition! */ 256 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) { 257 if (status & ATA_ERR) {
109 mg_dump_status("mg_wait", status, host); 258 mg_dump_status("mg_wait", status, host);
110 break; 259 break;
111 } 260 }
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
114 if (MG_READY_OK(status)) 263 if (MG_READY_OK(status))
115 break; 264 break;
116 265
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ) 266 if (expect == ATA_DRQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ) 267 if (status & ATA_DRQ)
119 break; 268 break;
120 } 269 }
121 if (!msec) { 270 if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
173 return IRQ_HANDLED; 322 return IRQ_HANDLED;
174} 323}
175 324
325/* local copy of ata_id_string() */
326static void mg_id_string(const u16 *id, unsigned char *s,
327 unsigned int ofs, unsigned int len)
328{
329 unsigned int c;
330
331 BUG_ON(len & 1);
332
333 while (len > 0) {
334 c = id[ofs] >> 8;
335 *s = c;
336 s++;
337
338 c = id[ofs] & 0xff;
339 *s = c;
340 s++;
341
342 ofs++;
343 len -= 2;
344 }
345}
346
347/* local copy of ata_id_c_string() */
348static void mg_id_c_string(const u16 *id, unsigned char *s,
349 unsigned int ofs, unsigned int len)
350{
351 unsigned char *p;
352
353 mg_id_string(id, s, ofs, len - 1);
354
355 p = s + strnlen(s, len - 1);
356 while (p > s && p[-1] == ' ')
357 p--;
358 *p = '\0';
359}
360
176static int mg_get_disk_id(struct mg_host *host) 361static int mg_get_disk_id(struct mg_host *host)
177{ 362{
178 u32 i; 363 u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
184 char serial[ATA_ID_SERNO_LEN + 1]; 369 char serial[ATA_ID_SERNO_LEN + 1];
185 370
186 if (!prv_data->use_polling) 371 if (!prv_data->use_polling)
187 outb(MG_REG_CTRL_INTR_DISABLE, 372 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
188 (unsigned long)host->dev_base +
189 MG_REG_DRV_CTRL);
190 373
191 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); 374 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
192 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ); 375 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
193 if (err) 376 if (err)
194 return err; 377 return err;
195 378
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
219 host->n_sectors -= host->nres_sectors; 402 host->n_sectors -= host->nres_sectors;
220 } 403 }
221 404
222 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 405 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
223 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 406 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
224 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 407 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
225 printk(KERN_INFO "mg_disk: model: %s\n", model); 408 printk(KERN_INFO "mg_disk: model: %s\n", model);
226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); 409 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
227 printk(KERN_INFO "mg_disk: serial: %s\n", serial); 410 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
229 host->n_sectors, host->nres_sectors); 412 host->n_sectors, host->nres_sectors);
230 413
231 if (!prv_data->use_polling) 414 if (!prv_data->use_polling)
232 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 415 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
233 MG_REG_DRV_CTRL);
234 416
235 return err; 417 return err;
236} 418}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
244 426
245 /* hdd rst low */ 427 /* hdd rst low */
246 gpio_set_value(host->rst, 0); 428 gpio_set_value(host->rst, 0);
247 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 429 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
248 if (err) 430 if (err)
249 return err; 431 return err;
250 432
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
255 return err; 437 return err;
256 438
257 /* soft reset on */ 439 /* soft reset on */
258 outb(MG_REG_CTRL_RESET | 440 outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
259 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
260 MG_REG_CTRL_INTR_ENABLE),
261 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 441 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
262 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 442 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
263 if (err) 443 if (err)
264 return err; 444 return err;
265 445
266 /* soft reset off */ 446 /* soft reset off */
267 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 447 outb(prv_data->use_polling ? ATA_NIEN : 0,
268 MG_REG_CTRL_INTR_ENABLE,
269 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 448 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
270 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); 449 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
271 if (err) 450 if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
281 460
282static void mg_bad_rw_intr(struct mg_host *host) 461static void mg_bad_rw_intr(struct mg_host *host)
283{ 462{
284 struct request *req = elv_next_request(host->breq); 463 if (host->req)
285 if (req != NULL) 464 if (++host->req->errors >= MG_MAX_ERRORS ||
286 if (++req->errors >= MG_MAX_ERRORS || 465 host->error == MG_ERR_TIMEOUT)
287 host->error == MG_ERR_TIMEOUT) 466 mg_end_request_cur(host, -EIO);
288 end_request(req, 0);
289} 467}
290 468
291static unsigned int mg_out(struct mg_host *host, 469static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
311 MG_REG_CYL_LOW); 489 MG_REG_CYL_LOW);
312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + 490 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
313 MG_REG_CYL_HIGH); 491 MG_REG_CYL_HIGH);
314 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE), 492 outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); 493 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); 494 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
317 return MG_ERR_NONE; 495 return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
319 497
320static void mg_read(struct request *req) 498static void mg_read(struct request *req)
321{ 499{
322 u32 remains, j; 500 u32 j;
323 struct mg_host *host = req->rq_disk->private_data; 501 struct mg_host *host = req->rq_disk->private_data;
324 502
325 remains = req->nr_sectors; 503 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
326 504 MG_CMD_RD, NULL) != MG_ERR_NONE)
327 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
328 MG_ERR_NONE)
329 mg_bad_rw_intr(host); 505 mg_bad_rw_intr(host);
330 506
331 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 507 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
332 remains, req->sector, req->buffer); 508 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
509
510 do {
511 u16 *buff = (u16 *)req->buffer;
333 512
334 while (remains) { 513 if (mg_wait(host, ATA_DRQ,
335 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 514 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
336 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
337 mg_bad_rw_intr(host); 515 mg_bad_rw_intr(host);
338 return; 516 return;
339 } 517 }
340 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 518 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
341 *(u16 *)req->buffer = 519 *buff++ = inw((unsigned long)host->dev_base +
342 inw((unsigned long)host->dev_base + 520 MG_BUFF_OFFSET + (j << 1));
343 MG_BUFF_OFFSET + (j << 1));
344 req->buffer += 2;
345 }
346
347 req->sector++;
348 req->errors = 0;
349 remains = --req->nr_sectors;
350 --req->current_nr_sectors;
351
352 if (req->current_nr_sectors <= 0) {
353 MG_DBG("remain : %d sects\n", remains);
354 end_request(req, 1);
355 if (remains > 0)
356 req = elv_next_request(host->breq);
357 }
358 521
359 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 522 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
360 MG_REG_COMMAND); 523 MG_REG_COMMAND);
361 } 524 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
362} 525}
363 526
364static void mg_write(struct request *req) 527static void mg_write(struct request *req)
365{ 528{
366 u32 remains, j; 529 u32 j;
367 struct mg_host *host = req->rq_disk->private_data; 530 struct mg_host *host = req->rq_disk->private_data;
368 531
369 remains = req->nr_sectors; 532 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
370 533 MG_CMD_WR, NULL) != MG_ERR_NONE) {
371 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
372 MG_ERR_NONE) {
373 mg_bad_rw_intr(host); 534 mg_bad_rw_intr(host);
374 return; 535 return;
375 } 536 }
376 537
377
378 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 538 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
379 remains, req->sector, req->buffer); 539 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
380 while (remains) { 540
381 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 541 do {
382 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 542 u16 *buff = (u16 *)req->buffer;
543
544 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
383 mg_bad_rw_intr(host); 545 mg_bad_rw_intr(host);
384 return; 546 return;
385 } 547 }
386 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 548 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
387 outw(*(u16 *)req->buffer, 549 outw(*buff++, (unsigned long)host->dev_base +
388 (unsigned long)host->dev_base + 550 MG_BUFF_OFFSET + (j << 1));
389 MG_BUFF_OFFSET + (j << 1));
390 req->buffer += 2;
391 }
392 req->sector++;
393 remains = --req->nr_sectors;
394 --req->current_nr_sectors;
395
396 if (req->current_nr_sectors <= 0) {
397 MG_DBG("remain : %d sects\n", remains);
398 end_request(req, 1);
399 if (remains > 0)
400 req = elv_next_request(host->breq);
401 }
402 551
403 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 552 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
404 MG_REG_COMMAND); 553 MG_REG_COMMAND);
405 } 554 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
406} 555}
407 556
408static void mg_read_intr(struct mg_host *host) 557static void mg_read_intr(struct mg_host *host)
409{ 558{
559 struct request *req = host->req;
410 u32 i; 560 u32 i;
411 struct request *req; 561 u16 *buff;
412 562
413 /* check status */ 563 /* check status */
414 do { 564 do {
415 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 565 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
416 if (i & MG_REG_STATUS_BIT_BUSY) 566 if (i & ATA_BUSY)
417 break; 567 break;
418 if (!MG_READY_OK(i)) 568 if (!MG_READY_OK(i))
419 break; 569 break;
420 if (i & MG_REG_STATUS_BIT_DATA_REQ) 570 if (i & ATA_DRQ)
421 goto ok_to_read; 571 goto ok_to_read;
422 } while (0); 572 } while (0);
423 mg_dump_status("mg_read_intr", i, host); 573 mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
427 577
428ok_to_read: 578ok_to_read:
429 /* get current segment of request */ 579 /* get current segment of request */
430 req = elv_next_request(host->breq); 580 buff = (u16 *)req->buffer;
431 581
432 /* read 1 sector */ 582 /* read 1 sector */
433 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 583 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
434 *(u16 *)req->buffer = 584 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
435 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 585 (i << 1));
436 (i << 1));
437 req->buffer += 2;
438 }
439 586
440 /* manipulate request */
441 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 587 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
442 req->sector, req->nr_sectors - 1, req->buffer); 588 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
443
444 req->sector++;
445 req->errors = 0;
446 i = --req->nr_sectors;
447 --req->current_nr_sectors;
448
449 /* let know if current segment done */
450 if (req->current_nr_sectors <= 0)
451 end_request(req, 1);
452
453 /* set handler if read remains */
454 if (i > 0) {
455 host->mg_do_intr = mg_read_intr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
457 }
458 589
459 /* send read confirm */ 590 /* send read confirm */
460 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
461 592
462 /* goto next request */ 593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
463 if (!i) 594 /* set handler if read remains */
595 host->mg_do_intr = mg_read_intr;
596 mod_timer(&host->timer, jiffies + 3 * HZ);
597 } else /* goto next request */
464 mg_request(host->breq); 598 mg_request(host->breq);
465} 599}
466 600
467static void mg_write_intr(struct mg_host *host) 601static void mg_write_intr(struct mg_host *host)
468{ 602{
603 struct request *req = host->req;
469 u32 i, j; 604 u32 i, j;
470 u16 *buff; 605 u16 *buff;
471 struct request *req; 606 bool rem;
472
473 /* get current segment of request */
474 req = elv_next_request(host->breq);
475 607
476 /* check status */ 608 /* check status */
477 do { 609 do {
478 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 610 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
479 if (i & MG_REG_STATUS_BIT_BUSY) 611 if (i & ATA_BUSY)
480 break; 612 break;
481 if (!MG_READY_OK(i)) 613 if (!MG_READY_OK(i))
482 break; 614 break;
483 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ)) 615 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
484 goto ok_to_write; 616 goto ok_to_write;
485 } while (0); 617 } while (0);
486 mg_dump_status("mg_write_intr", i, host); 618 mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
489 return; 621 return;
490 622
491ok_to_write: 623ok_to_write:
492 /* manipulate request */ 624 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
493 req->sector++; 625 /* write 1 sector and set handler if remains */
494 i = --req->nr_sectors;
495 --req->current_nr_sectors;
496 req->buffer += MG_SECTOR_SIZE;
497
498 /* let know if current segment or all done */
499 if (!i || (req->bio && req->current_nr_sectors <= 0))
500 end_request(req, 1);
501
502 /* write 1 sector and set handler if remains */
503 if (i > 0) {
504 buff = (u16 *)req->buffer; 626 buff = (u16 *)req->buffer;
505 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 627 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
506 outw(*buff, (unsigned long)host->dev_base + 628 outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
508 buff++; 630 buff++;
509 } 631 }
510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 632 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
511 req->sector, req->nr_sectors, req->buffer); 633 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
512 host->mg_do_intr = mg_write_intr; 634 host->mg_do_intr = mg_write_intr;
513 mod_timer(&host->timer, jiffies + 3 * HZ); 635 mod_timer(&host->timer, jiffies + 3 * HZ);
514 } 636 }
@@ -516,7 +638,7 @@ ok_to_write:
516 /* send write confirm */ 638 /* send write confirm */
517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 639 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
518 640
519 if (!i) 641 if (!rem)
520 mg_request(host->breq); 642 mg_request(host->breq);
521} 643}
522 644
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
524{ 646{
525 struct mg_host *host = (struct mg_host *)data; 647 struct mg_host *host = (struct mg_host *)data;
526 char *name; 648 char *name;
527 struct request *req;
528 649
529 spin_lock_irq(&host->lock); 650 spin_lock_irq(&host->lock);
530 651
531 req = elv_next_request(host->breq); 652 if (!host->req)
532 if (!req)
533 goto out_unlock; 653 goto out_unlock;
534 654
535 host->mg_do_intr = NULL; 655 host->mg_do_intr = NULL;
536 656
537 name = req->rq_disk->disk_name; 657 name = host->req->rq_disk->disk_name;
538 printk(KERN_DEBUG "%s: timeout\n", name); 658 printk(KERN_DEBUG "%s: timeout\n", name);
539 659
540 host->error = MG_ERR_TIMEOUT; 660 host->error = MG_ERR_TIMEOUT;
541 mg_bad_rw_intr(host); 661 mg_bad_rw_intr(host);
542 662
543 mg_request(host->breq);
544out_unlock: 663out_unlock:
664 mg_request(host->breq);
545 spin_unlock_irq(&host->lock); 665 spin_unlock_irq(&host->lock);
546} 666}
547 667
548static void mg_request_poll(struct request_queue *q) 668static void mg_request_poll(struct request_queue *q)
549{ 669{
550 struct request *req; 670 struct mg_host *host = q->queuedata;
551 struct mg_host *host;
552 671
553 while ((req = elv_next_request(q)) != NULL) { 672 while (1) {
554 host = req->rq_disk->private_data; 673 if (!host->req) {
555 if (blk_fs_request(req)) { 674 host->req = blk_fetch_request(q);
556 switch (rq_data_dir(req)) { 675 if (!host->req)
557 case READ:
558 mg_read(req);
559 break;
560 case WRITE:
561 mg_write(req);
562 break;
563 default:
564 printk(KERN_WARNING "%s:%d unknown command\n",
565 __func__, __LINE__);
566 end_request(req, 0);
567 break; 676 break;
568 }
569 } 677 }
678
679 if (unlikely(!blk_fs_request(host->req))) {
680 mg_end_request_cur(host, -EIO);
681 continue;
682 }
683
684 if (rq_data_dir(host->req) == READ)
685 mg_read(host->req);
686 else
687 mg_write(host->req);
570 } 688 }
571} 689}
572 690
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
588 break; 706 break;
589 case WRITE: 707 case WRITE:
590 /* TODO : handler */ 708 /* TODO : handler */
591 outb(MG_REG_CTRL_INTR_DISABLE, 709 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
592 (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 710 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
595 != MG_ERR_NONE) { 711 != MG_ERR_NONE) {
596 mg_bad_rw_intr(host); 712 mg_bad_rw_intr(host);
597 return host->error; 713 return host->error;
598 } 714 }
599 del_timer(&host->timer); 715 del_timer(&host->timer);
600 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ); 716 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
601 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 717 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
602 MG_REG_DRV_CTRL);
603 if (host->error) { 718 if (host->error) {
604 mg_bad_rw_intr(host); 719 mg_bad_rw_intr(host);
605 return host->error; 720 return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 729 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
615 MG_REG_COMMAND); 730 MG_REG_COMMAND);
616 break; 731 break;
617 default:
618 printk(KERN_WARNING "%s:%d unknown command\n",
619 __func__, __LINE__);
620 end_request(req, 0);
621 break;
622 } 732 }
623 return MG_ERR_NONE; 733 return MG_ERR_NONE;
624} 734}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
626/* This function also called from IRQ context */ 736/* This function also called from IRQ context */
627static void mg_request(struct request_queue *q) 737static void mg_request(struct request_queue *q)
628{ 738{
739 struct mg_host *host = q->queuedata;
629 struct request *req; 740 struct request *req;
630 struct mg_host *host;
631 u32 sect_num, sect_cnt; 741 u32 sect_num, sect_cnt;
632 742
633 while (1) { 743 while (1) {
634 req = elv_next_request(q); 744 if (!host->req) {
635 if (!req) 745 host->req = blk_fetch_request(q);
636 return; 746 if (!host->req)
637 747 break;
638 host = req->rq_disk->private_data; 748 }
749 req = host->req;
639 750
640 /* check unwanted request call */ 751 /* check unwanted request call */
641 if (host->mg_do_intr) 752 if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
643 754
644 del_timer(&host->timer); 755 del_timer(&host->timer);
645 756
646 sect_num = req->sector; 757 sect_num = blk_rq_pos(req);
647 /* deal whole segments */ 758 /* deal whole segments */
648 sect_cnt = req->nr_sectors; 759 sect_cnt = blk_rq_sectors(req);
649 760
650 /* sanity check */ 761 /* sanity check */
651 if (sect_num >= get_capacity(req->rq_disk) || 762 if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
655 "%s: bad access: sector=%d, count=%d\n", 766 "%s: bad access: sector=%d, count=%d\n",
656 req->rq_disk->disk_name, 767 req->rq_disk->disk_name,
657 sect_num, sect_cnt); 768 sect_num, sect_cnt);
658 end_request(req, 0); 769 mg_end_request_cur(host, -EIO);
659 continue; 770 continue;
660 } 771 }
661 772
662 if (!blk_fs_request(req)) 773 if (unlikely(!blk_fs_request(req))) {
663 return; 774 mg_end_request_cur(host, -EIO);
775 continue;
776 }
664 777
665 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 778 if (!mg_issue_req(req, host, sect_num, sect_cnt))
666 return; 779 return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
690 return -EIO; 803 return -EIO;
691 804
692 if (!prv_data->use_polling) 805 if (!prv_data->use_polling)
693 outb(MG_REG_CTRL_INTR_DISABLE, 806 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
694 (unsigned long)host->dev_base +
695 MG_REG_DRV_CTRL);
696 807
697 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); 808 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
698 /* wait until mflash deep sleep */ 809 /* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
700 811
701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { 812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
702 if (!prv_data->use_polling) 813 if (!prv_data->use_polling)
703 outb(MG_REG_CTRL_INTR_ENABLE, 814 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
704 (unsigned long)host->dev_base +
705 MG_REG_DRV_CTRL);
706 return -EIO; 815 return -EIO;
707 } 816 }
708 817
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
725 return -EIO; 834 return -EIO;
726 835
727 if (!prv_data->use_polling) 836 if (!prv_data->use_polling)
728 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 837 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
729 MG_REG_DRV_CTRL);
730 838
731 return 0; 839 return 0;
732} 840}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
877 __func__, __LINE__); 985 __func__, __LINE__);
878 goto probe_err_5; 986 goto probe_err_5;
879 } 987 }
988 host->breq->queuedata = host;
880 989
881 /* mflash is random device, thanx for the noop */ 990 /* mflash is random device, thanx for the noop */
882 elevator_exit(host->breq->elevator); 991 elevator_exit(host->breq->elevator);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15cc..5d23ffad7c7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
110 req, error ? "failed" : "done"); 110 req, error ? "failed" : "done");
111 111
112 spin_lock_irqsave(q->queue_lock, flags); 112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request(req, error, req->nr_sectors << 9); 113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags); 114 spin_unlock_irqrestore(q->queue_lock, flags);
115} 115}
116 116
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
231{ 231{
232 int result, flags; 232 int result, flags;
233 struct nbd_request request; 233 struct nbd_request request;
234 unsigned long size = req->nr_sectors << 9; 234 unsigned long size = blk_rq_bytes(req);
235 235
236 request.magic = htonl(NBD_REQUEST_MAGIC); 236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req)); 237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64) req->sector << 9); 238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size); 239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req)); 240 memcpy(request.handle, &req, sizeof(req));
241 241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req, 243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)), 244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)req->sector << 9, 245 (unsigned long long)blk_rq_pos(req) << 9,
246 req->nr_sectors << 9); 246 blk_rq_bytes(req));
247 result = sock_xmit(lo, 1, &request, sizeof(request), 247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) { 249 if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
533{ 533{
534 struct request *req; 534 struct request *req;
535 535
536 while ((req = elv_next_request(q)) != NULL) { 536 while ((req = blk_fetch_request(q)) != NULL) {
537 struct nbd_device *lo; 537 struct nbd_device *lo;
538 538
539 blkdev_dequeue_request(req);
540
541 spin_unlock_irq(q->queue_lock); 539 spin_unlock_irq(q->queue_lock);
542 540
543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 541 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
580 blk_rq_init(NULL, &sreq); 578 blk_rq_init(NULL, &sreq);
581 sreq.cmd_type = REQ_TYPE_SPECIAL; 579 sreq.cmd_type = REQ_TYPE_SPECIAL;
582 nbd_cmd(&sreq) = NBD_CMD_DISC; 580 nbd_cmd(&sreq) = NBD_CMD_DISC;
583 /*
584 * Set these to sane values in case server implementation
585 * fails to check the request type first and also to keep
586 * debugging output cleaner.
587 */
588 sreq.sector = 0;
589 sreq.nr_sectors = 0;
590 if (!lo->sock) 581 if (!lo->sock)
591 return -EINVAL; 582 return -EINVAL;
592 nbd_send_req(lo, &sreq); 583 nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014..911dfd98d81 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
719 if (pcd_busy) 719 if (pcd_busy)
720 return; 720 return;
721 while (1) { 721 while (1) {
722 pcd_req = elv_next_request(q); 722 if (!pcd_req) {
723 if (!pcd_req) 723 pcd_req = blk_fetch_request(q);
724 return; 724 if (!pcd_req)
725 return;
726 }
725 727
726 if (rq_data_dir(pcd_req) == READ) { 728 if (rq_data_dir(pcd_req) == READ) {
727 struct pcd_unit *cd = pcd_req->rq_disk->private_data; 729 struct pcd_unit *cd = pcd_req->rq_disk->private_data;
728 if (cd != pcd_current) 730 if (cd != pcd_current)
729 pcd_bufblk = -1; 731 pcd_bufblk = -1;
730 pcd_current = cd; 732 pcd_current = cd;
731 pcd_sector = pcd_req->sector; 733 pcd_sector = blk_rq_pos(pcd_req);
732 pcd_count = pcd_req->current_nr_sectors; 734 pcd_count = blk_rq_cur_sectors(pcd_req);
733 pcd_buf = pcd_req->buffer; 735 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1; 736 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice); 737 ps_set_intr(do_pcd_read, NULL, 0, nice);
736 return; 738 return;
737 } else 739 } else {
738 end_request(pcd_req, 0); 740 __blk_end_request_all(pcd_req, -EIO);
741 pcd_req = NULL;
742 }
739 } 743 }
740} 744}
741 745
742static inline void next_request(int success) 746static inline void next_request(int err)
743{ 747{
744 unsigned long saved_flags; 748 unsigned long saved_flags;
745 749
746 spin_lock_irqsave(&pcd_lock, saved_flags); 750 spin_lock_irqsave(&pcd_lock, saved_flags);
747 end_request(pcd_req, success); 751 if (!__blk_end_request_cur(pcd_req, err))
752 pcd_req = NULL;
748 pcd_busy = 0; 753 pcd_busy = 0;
749 do_pcd_request(pcd_queue); 754 do_pcd_request(pcd_queue);
750 spin_unlock_irqrestore(&pcd_lock, saved_flags); 755 spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
781 786
782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { 787 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
783 pcd_bufblk = -1; 788 pcd_bufblk = -1;
784 next_request(0); 789 next_request(-EIO);
785 return; 790 return;
786 } 791 }
787 792
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
796 pcd_retries = 0; 801 pcd_retries = 0;
797 pcd_transfer(); 802 pcd_transfer();
798 if (!pcd_count) { 803 if (!pcd_count) {
799 next_request(1); 804 next_request(0);
800 return; 805 return;
801 } 806 }
802 807
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
815 return; 820 return;
816 } 821 }
817 pcd_bufblk = -1; 822 pcd_bufblk = -1;
818 next_request(0); 823 next_request(-EIO);
819 return; 824 return;
820 } 825 }
821 826
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af..bf5955b3d87 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
410 pd_claimed = 0; 410 pd_claimed = 0;
411 phase = NULL; 411 phase = NULL;
412 spin_lock_irqsave(&pd_lock, saved_flags); 412 spin_lock_irqsave(&pd_lock, saved_flags);
413 end_request(pd_req, res); 413 if (!__blk_end_request_cur(pd_req,
414 pd_req = elv_next_request(pd_queue); 414 res == Ok ? 0 : -EIO)) {
415 if (!pd_req) 415 pd_req = blk_fetch_request(pd_queue);
416 stop = 1; 416 if (!pd_req)
417 stop = 1;
418 }
417 spin_unlock_irqrestore(&pd_lock, saved_flags); 419 spin_unlock_irqrestore(&pd_lock, saved_flags);
418 if (stop) 420 if (stop)
419 return; 421 return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
443 445
444 pd_cmd = rq_data_dir(pd_req); 446 pd_cmd = rq_data_dir(pd_req);
445 if (pd_cmd == READ || pd_cmd == WRITE) { 447 if (pd_cmd == READ || pd_cmd == WRITE) {
446 pd_block = pd_req->sector; 448 pd_block = blk_rq_pos(pd_req);
447 pd_count = pd_req->current_nr_sectors; 449 pd_count = blk_rq_cur_sectors(pd_req);
448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 450 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
449 return Fail; 451 return Fail;
450 pd_run = pd_req->nr_sectors; 452 pd_run = blk_rq_sectors(pd_req);
451 pd_buf = pd_req->buffer; 453 pd_buf = pd_req->buffer;
452 pd_retries = 0; 454 pd_retries = 0;
453 if (pd_cmd == READ) 455 if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
477 if (pd_count) 479 if (pd_count)
478 return 0; 480 return 0;
479 spin_lock_irqsave(&pd_lock, saved_flags); 481 spin_lock_irqsave(&pd_lock, saved_flags);
480 end_request(pd_req, 1); 482 __blk_end_request_cur(pd_req, 0);
481 pd_count = pd_req->current_nr_sectors; 483 pd_count = blk_rq_cur_sectors(pd_req);
482 pd_buf = pd_req->buffer; 484 pd_buf = pd_req->buffer;
483 spin_unlock_irqrestore(&pd_lock, saved_flags); 485 spin_unlock_irqrestore(&pd_lock, saved_flags);
484 return 0; 486 return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
702{ 704{
703 if (pd_req) 705 if (pd_req)
704 return; 706 return;
705 pd_req = elv_next_request(q); 707 pd_req = blk_fetch_request(q);
706 if (!pd_req) 708 if (!pd_req)
707 return; 709 return;
708 710
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3..68a90834e99 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate) 753static void pf_end_request(int err)
754{ 754{
755 if (pf_req) { 755 if (pf_req && !__blk_end_request_cur(pf_req, err))
756 end_request(pf_req, uptodate);
757 pf_req = NULL; 756 pf_req = NULL;
758 }
759} 757}
760 758
761static void do_pf_request(struct request_queue * q) 759static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
763 if (pf_busy) 761 if (pf_busy)
764 return; 762 return;
765repeat: 763repeat:
766 pf_req = elv_next_request(q); 764 if (!pf_req) {
767 if (!pf_req) 765 pf_req = blk_fetch_request(q);
768 return; 766 if (!pf_req)
767 return;
768 }
769 769
770 pf_current = pf_req->rq_disk->private_data; 770 pf_current = pf_req->rq_disk->private_data;
771 pf_block = pf_req->sector; 771 pf_block = blk_rq_pos(pf_req);
772 pf_run = pf_req->nr_sectors; 772 pf_run = blk_rq_sectors(pf_req);
773 pf_count = pf_req->current_nr_sectors; 773 pf_count = blk_rq_cur_sectors(pf_req);
774 774
775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
776 pf_end_request(0); 776 pf_end_request(-EIO);
777 goto repeat; 777 goto repeat;
778 } 778 }
779 779
@@ -788,7 +788,7 @@ repeat:
788 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
789 else { 789 else {
790 pf_busy = 0; 790 pf_busy = 0;
791 pf_end_request(0); 791 pf_end_request(-EIO);
792 goto repeat; 792 goto repeat;
793 } 793 }
794} 794}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
805 return 1; 805 return 1;
806 if (!pf_count) { 806 if (!pf_count) {
807 spin_lock_irqsave(&pf_spin_lock, saved_flags); 807 spin_lock_irqsave(&pf_spin_lock, saved_flags);
808 pf_end_request(1); 808 pf_end_request(0);
809 pf_req = elv_next_request(pf_queue);
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 809 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req) 810 if (!pf_req)
812 return 1; 811 return 1;
813 pf_count = pf_req->current_nr_sectors; 812 pf_count = blk_rq_cur_sectors(pf_req);
814 pf_buf = pf_req->buffer; 813 pf_buf = pf_req->buffer;
815 } 814 }
816 return 0; 815 return 0;
817} 816}
818 817
819static inline void next_request(int success) 818static inline void next_request(int err)
820{ 819{
821 unsigned long saved_flags; 820 unsigned long saved_flags;
822 821
823 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
824 pf_end_request(success); 823 pf_end_request(err);
825 pf_busy = 0; 824 pf_busy = 0;
826 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
827 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
844 pi_do_claimed(pf_current->pi, do_pf_read_start); 843 pi_do_claimed(pf_current->pi, do_pf_read_start);
845 return; 844 return;
846 } 845 }
847 next_request(0); 846 next_request(-EIO);
848 return; 847 return;
849 } 848 }
850 pf_mask = STAT_DRQ; 849 pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
863 pi_do_claimed(pf_current->pi, do_pf_read_start); 862 pi_do_claimed(pf_current->pi, do_pf_read_start);
864 return; 863 return;
865 } 864 }
866 next_request(0); 865 next_request(-EIO);
867 return; 866 return;
868 } 867 }
869 pi_read_block(pf_current->pi, pf_buf, 512); 868 pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
871 break; 870 break;
872 } 871 }
873 pi_disconnect(pf_current->pi); 872 pi_disconnect(pf_current->pi);
874 next_request(1); 873 next_request(0);
875} 874}
876 875
877static void do_pf_write(void) 876static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
890 pi_do_claimed(pf_current->pi, do_pf_write_start); 889 pi_do_claimed(pf_current->pi, do_pf_write_start);
891 return; 890 return;
892 } 891 }
893 next_request(0); 892 next_request(-EIO);
894 return; 893 return;
895 } 894 }
896 895
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
903 pi_do_claimed(pf_current->pi, do_pf_write_start); 902 pi_do_claimed(pf_current->pi, do_pf_write_start);
904 return; 903 return;
905 } 904 }
906 next_request(0); 905 next_request(-EIO);
907 return; 906 return;
908 } 907 }
909 pi_write_block(pf_current->pi, pf_buf, 512); 908 pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
923 pi_do_claimed(pf_current->pi, do_pf_write_start); 922 pi_do_claimed(pf_current->pi, do_pf_write_start);
924 return; 923 return;
925 } 924 }
926 next_request(0); 925 next_request(-EIO);
927 return; 926 return;
928 } 927 }
929 pi_disconnect(pf_current->pi); 928 pi_disconnect(pf_current->pi);
930 next_request(1); 929 next_request(0);
931} 930}
932 931
933static int __init pf_init(void) 932static int __init pf_init(void)
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb921..338cee4cc0b 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
134 rq_for_each_segment(bv, req, iter) 134 rq_for_each_segment(bv, req, iter)
135 n++; 135 n++;
136 dev_dbg(&dev->sbd.core, 136 dev_dbg(&dev->sbd.core,
137 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 137 "%s:%u: %s req has %u bvecs for %u sectors\n",
138 __func__, __LINE__, op, n, req->nr_sectors, 138 __func__, __LINE__, op, n, blk_rq_sectors(req));
139 req->hard_nr_sectors);
140#endif 139#endif
141 140
142 start_sector = req->sector * priv->blocking_factor; 141 start_sector = blk_rq_pos(req) * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 142 sectors = blk_rq_sectors(req) * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 144 __func__, __LINE__, op, sectors, start_sector);
146 145
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
158 if (res) { 157 if (res) {
159 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, 158 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
160 __LINE__, op, res); 159 __LINE__, op, res);
161 end_request(req, 0); 160 __blk_end_request_all(req, -EIO);
162 return 0; 161 return 0;
163 } 162 }
164 163
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
180 if (res) { 179 if (res) {
181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", 180 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
182 __func__, __LINE__, res); 181 __func__, __LINE__, res);
183 end_request(req, 0); 182 __blk_end_request_all(req, -EIO);
184 return 0; 183 return 0;
185 } 184 }
186 185
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
195 194
196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 195 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
197 196
198 while ((req = elv_next_request(q))) { 197 while ((req = blk_fetch_request(q))) {
199 if (blk_fs_request(req)) { 198 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 199 if (ps3disk_submit_request_sg(dev, req))
201 break; 200 break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
205 break; 204 break;
206 } else { 205 } else {
207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 206 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
208 end_request(req, 0); 207 __blk_end_request_all(req, -EIO);
209 continue; 208 continue;
210 } 209 }
211 } 210 }
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
231 struct request *req; 230 struct request *req;
232 int res, read, error; 231 int res, read, error;
233 u64 tag, status; 232 u64 tag, status;
234 unsigned long num_sectors;
235 const char *op; 233 const char *op;
236 234
237 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); 235 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 259 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) { 260 req->cmd[0] == REQ_LB_OP_FLUSH) {
263 read = 0; 261 read = 0;
264 num_sectors = req->hard_cur_sectors;
265 op = "flush"; 262 op = "flush";
266 } else { 263 } else {
267 read = !rq_data_dir(req); 264 read = !rq_data_dir(req);
268 num_sectors = req->nr_sectors;
269 op = read ? "read" : "write"; 265 op = read ? "read" : "write";
270 } 266 }
271 if (status) { 267 if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
281 } 277 }
282 278
283 spin_lock(&priv->lock); 279 spin_lock(&priv->lock);
284 __blk_end_request(req, error, num_sectors << 9); 280 __blk_end_request_all(req, error);
285 priv->req = NULL; 281 priv->req = NULL;
286 ps3disk_do_request(dev, priv->queue); 282 ps3disk_do_request(dev, priv->queue);
287 spin_unlock(&priv->lock); 283 spin_unlock(&priv->lock);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe6..cbfd9c0aef0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213} 213}
214 214
215static void vdc_end_request(struct request *req, int error, int num_sectors)
216{
217 __blk_end_request(req, error, num_sectors << 9);
218}
219
220static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
221 unsigned int index) 216 unsigned int index)
222{ 217{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
239 234
240 rqe->req = NULL; 235 rqe->req = NULL;
241 236
242 vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); 237 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
243 238
244 if (blk_queue_stopped(port->disk->queue)) 239 if (blk_queue_stopped(port->disk->queue))
245 blk_start_queue(port->disk->queue); 240 blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
421 desc->slice = 0; 416 desc->slice = 0;
422 } 417 }
423 desc->status = ~0; 418 desc->status = ~0;
424 desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
425 desc->size = len; 420 desc->size = len;
426 desc->ncookies = err; 421 desc->ncookies = err;
427 422
@@ -446,14 +441,13 @@ out:
446static void do_vdc_request(struct request_queue *q) 441static void do_vdc_request(struct request_queue *q)
447{ 442{
448 while (1) { 443 while (1) {
449 struct request *req = elv_next_request(q); 444 struct request *req = blk_fetch_request(q);
450 445
451 if (!req) 446 if (!req)
452 break; 447 break;
453 448
454 blkdev_dequeue_request(req);
455 if (__send_request(req) < 0) 449 if (__send_request(req) < 0)
456 vdc_end_request(req, -EIO, req->hard_nr_sectors); 450 __blk_end_request_all(req, -EIO);
457 } 451 }
458} 452}
459 453
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc385693..cf7877fb8a7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
514 ret = swim_read_sector(fs, side, track, sector, 514 ret = swim_read_sector(fs, side, track, sector,
515 buffer); 515 buffer);
516 if (try-- == 0) 516 if (try-- == 0)
517 return -1; 517 return -EIO;
518 } while (ret != 512); 518 } while (ret != 512);
519 519
520 buffer += ret; 520 buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
528 struct request *req; 528 struct request *req;
529 struct floppy_state *fs; 529 struct floppy_state *fs;
530 530
531 while ((req = elv_next_request(q))) { 531 req = blk_fetch_request(q);
532 while (req) {
533 int err = -EIO;
532 534
533 fs = req->rq_disk->private_data; 535 fs = req->rq_disk->private_data;
534 if (req->sector < 0 || req->sector >= fs->total_secs) { 536 if (blk_rq_pos(req) >= fs->total_secs)
535 end_request(req, 0); 537 goto done;
536 continue; 538 if (!fs->disk_in)
537 } 539 goto done;
538 if (req->current_nr_sectors == 0) { 540 if (rq_data_dir(req) == WRITE && fs->write_protected)
539 end_request(req, 1); 541 goto done;
540 continue; 542
541 }
542 if (!fs->disk_in) {
543 end_request(req, 0);
544 continue;
545 }
546 if (rq_data_dir(req) == WRITE) {
547 if (fs->write_protected) {
548 end_request(req, 0);
549 continue;
550 }
551 }
552 switch (rq_data_dir(req)) { 543 switch (rq_data_dir(req)) {
553 case WRITE: 544 case WRITE:
554 /* NOT IMPLEMENTED */ 545 /* NOT IMPLEMENTED */
555 end_request(req, 0);
556 break; 546 break;
557 case READ: 547 case READ:
558 if (floppy_read_sectors(fs, req->sector, 548 err = floppy_read_sectors(fs, blk_rq_pos(req),
559 req->current_nr_sectors, 549 blk_rq_cur_sectors(req),
560 req->buffer)) { 550 req->buffer);
561 end_request(req, 0);
562 continue;
563 }
564 req->nr_sectors -= req->current_nr_sectors;
565 req->sector += req->current_nr_sectors;
566 req->buffer += req->current_nr_sectors * 512;
567 end_request(req, 1);
568 break; 551 break;
569 } 552 }
553 done:
554 if (!__blk_end_request_cur(req, err))
555 req = blk_fetch_request(q);
570 } 556 }
571} 557}
572 558
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba..80df93e3cdd 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
254static bool swim3_end_request(int err, unsigned int nr_bytes)
255{
256 if (__blk_end_request(fd_req, err, nr_bytes))
257 return true;
258
259 fd_req = NULL;
260 return false;
261}
262
263static bool swim3_end_request_cur(int err)
264{
265 return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
266}
267
254static void swim3_select(struct floppy_state *fs, int sel) 268static void swim3_select(struct floppy_state *fs, int sel)
255{ 269{
256 struct swim3 __iomem *sw = fs->swim3; 270 struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
310 wake_up(&fs->wait); 324 wake_up(&fs->wait);
311 return; 325 return;
312 } 326 }
313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 327 while (fs->state == idle) {
328 if (!fd_req) {
329 fd_req = blk_fetch_request(swim3_queue);
330 if (!fd_req)
331 break;
332 }
333 req = fd_req;
314#if 0 334#if 0
315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 335 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
316 req->rq_disk->disk_name, req->cmd, 336 req->rq_disk->disk_name, req->cmd,
317 (long)req->sector, req->nr_sectors, req->buffer); 337 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
318 printk(" errors=%d current_nr_sectors=%ld\n", 338 printk(" errors=%d current_nr_sectors=%u\n",
319 req->errors, req->current_nr_sectors); 339 req->errors, blk_rq_cur_sectors(req));
320#endif 340#endif
321 341
322 if (req->sector < 0 || req->sector >= fs->total_secs) { 342 if (blk_rq_pos(req) >= fs->total_secs) {
323 end_request(req, 0); 343 swim3_end_request_cur(-EIO);
324 continue;
325 }
326 if (req->current_nr_sectors == 0) {
327 end_request(req, 1);
328 continue; 344 continue;
329 } 345 }
330 if (fs->ejected) { 346 if (fs->ejected) {
331 end_request(req, 0); 347 swim3_end_request_cur(-EIO);
332 continue; 348 continue;
333 } 349 }
334 350
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
336 if (fs->write_prot < 0) 352 if (fs->write_prot < 0)
337 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 353 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
338 if (fs->write_prot) { 354 if (fs->write_prot) {
339 end_request(req, 0); 355 swim3_end_request_cur(-EIO);
340 continue; 356 continue;
341 } 357 }
342 } 358 }
343 359
344 /* Do not remove the cast. req->sector is now a sector_t and 360 /* Do not remove the cast. blk_rq_pos(req) is now a
345 * can be 64 bits, but it will never go past 32 bits for this 361 * sector_t and can be 64 bits, but it will never go
346 * driver anyway, so we can safely cast it down and not have 362 * past 32 bits for this driver anyway, so we can
347 * to do a 64/32 division 363 * safely cast it down and not have to do a 64/32
364 * division
348 */ 365 */
349 fs->req_cyl = ((long)req->sector) / fs->secpercyl; 366 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
350 x = ((long)req->sector) % fs->secpercyl; 367 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
351 fs->head = x / fs->secpertrack; 368 fs->head = x / fs->secpertrack;
352 fs->req_sector = x % fs->secpertrack + 1; 369 fs->req_sector = x % fs->secpertrack + 1;
353 fd_req = req; 370 fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
424 struct dbdma_cmd *cp = fs->dma_cmd; 441 struct dbdma_cmd *cp = fs->dma_cmd;
425 struct dbdma_regs __iomem *dr = fs->dma; 442 struct dbdma_regs __iomem *dr = fs->dma;
426 443
427 if (fd_req->current_nr_sectors <= 0) { 444 if (blk_rq_cur_sectors(fd_req) <= 0) {
428 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 445 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
429 return; 446 return;
430 } 447 }
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
432 n = 1; 449 n = 1;
433 else { 450 else {
434 n = fs->secpertrack - fs->req_sector + 1; 451 n = fs->secpertrack - fs->req_sector + 1;
435 if (n > fd_req->current_nr_sectors) 452 if (n > blk_rq_cur_sectors(fd_req))
436 n = fd_req->current_nr_sectors; 453 n = blk_rq_cur_sectors(fd_req);
437 } 454 }
438 fs->scount = n; 455 fs->scount = n;
439 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 456 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
508 case do_transfer: 525 case do_transfer:
509 if (fs->cur_cyl != fs->req_cyl) { 526 if (fs->cur_cyl != fs->req_cyl) {
510 if (fs->retries > 5) { 527 if (fs->retries > 5) {
511 end_request(fd_req, 0); 528 swim3_end_request_cur(-EIO);
512 fs->state = idle; 529 fs->state = idle;
513 return; 530 return;
514 } 531 }
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
540 out_8(&sw->intr_enable, 0); 557 out_8(&sw->intr_enable, 0);
541 fs->cur_cyl = -1; 558 fs->cur_cyl = -1;
542 if (fs->retries > 5) { 559 if (fs->retries > 5) {
543 end_request(fd_req, 0); 560 swim3_end_request_cur(-EIO);
544 fs->state = idle; 561 fs->state = idle;
545 start_request(fs); 562 start_request(fs);
546 } else { 563 } else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
559 out_8(&sw->select, RELAX); 576 out_8(&sw->select, RELAX);
560 out_8(&sw->intr_enable, 0); 577 out_8(&sw->intr_enable, 0);
561 printk(KERN_ERR "swim3: seek timeout\n"); 578 printk(KERN_ERR "swim3: seek timeout\n");
562 end_request(fd_req, 0); 579 swim3_end_request_cur(-EIO);
563 fs->state = idle; 580 fs->state = idle;
564 start_request(fs); 581 start_request(fs);
565} 582}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
583 return; 600 return;
584 } 601 }
585 printk(KERN_ERR "swim3: seek settle timeout\n"); 602 printk(KERN_ERR "swim3: seek settle timeout\n");
586 end_request(fd_req, 0); 603 swim3_end_request_cur(-EIO);
587 fs->state = idle; 604 fs->state = idle;
588 start_request(fs); 605 start_request(fs);
589} 606}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
593 struct floppy_state *fs = (struct floppy_state *) data; 610 struct floppy_state *fs = (struct floppy_state *) data;
594 struct swim3 __iomem *sw = fs->swim3; 611 struct swim3 __iomem *sw = fs->swim3;
595 struct dbdma_regs __iomem *dr = fs->dma; 612 struct dbdma_regs __iomem *dr = fs->dma;
596 struct dbdma_cmd *cp = fs->dma_cmd;
597 unsigned long s;
598 int n; 613 int n;
599 614
600 fs->timeout_pending = 0; 615 fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
605 out_8(&sw->intr_enable, 0); 620 out_8(&sw->intr_enable, 0);
606 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 621 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
607 out_8(&sw->select, RELAX); 622 out_8(&sw->select, RELAX);
608 if (rq_data_dir(fd_req) == WRITE)
609 ++cp;
610 if (ld_le16(&cp->xfer_status) != 0)
611 s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
612 else
613 s = 0;
614 fd_req->sector += s;
615 fd_req->current_nr_sectors -= s;
616 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 623 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
617 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 624 (rq_data_dir(fd_req)==WRITE? "writ": "read"),
618 end_request(fd_req, 0); 625 (long)blk_rq_pos(fd_req));
626 swim3_end_request_cur(-EIO);
619 fs->state = idle; 627 fs->state = idle;
620 start_request(fs); 628 start_request(fs);
621} 629}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
646 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 654 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
647 fs->cur_cyl = -1; 655 fs->cur_cyl = -1;
648 if (fs->retries > 5) { 656 if (fs->retries > 5) {
649 end_request(fd_req, 0); 657 swim3_end_request_cur(-EIO);
650 fs->state = idle; 658 fs->state = idle;
651 start_request(fs); 659 start_request(fs);
652 } else { 660 } else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
719 if (intr & ERROR_INTR) { 727 if (intr & ERROR_INTR) {
720 n = fs->scount - 1 - resid / 512; 728 n = fs->scount - 1 - resid / 512;
721 if (n > 0) { 729 if (n > 0) {
722 fd_req->sector += n; 730 blk_update_request(fd_req, 0, n << 9);
723 fd_req->current_nr_sectors -= n;
724 fd_req->buffer += n * 512;
725 fs->req_sector += n; 731 fs->req_sector += n;
726 } 732 }
727 if (fs->retries < 5) { 733 if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
730 } else { 736 } else {
731 printk("swim3: error %sing block %ld (err=%x)\n", 737 printk("swim3: error %sing block %ld (err=%x)\n",
732 rq_data_dir(fd_req) == WRITE? "writ": "read", 738 rq_data_dir(fd_req) == WRITE? "writ": "read",
733 (long)fd_req->sector, err); 739 (long)blk_rq_pos(fd_req), err);
734 end_request(fd_req, 0); 740 swim3_end_request_cur(-EIO);
735 fs->state = idle; 741 fs->state = idle;
736 } 742 }
737 } else { 743 } else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
740 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 746 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
741 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 747 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
742 fs->state, rq_data_dir(fd_req), intr, err); 748 fs->state, rq_data_dir(fd_req), intr, err);
743 end_request(fd_req, 0); 749 swim3_end_request_cur(-EIO);
744 fs->state = idle; 750 fs->state = idle;
745 start_request(fs); 751 start_request(fs);
746 break; 752 break;
747 } 753 }
748 fd_req->sector += fs->scount; 754 if (swim3_end_request(0, fs->scount << 9)) {
749 fd_req->current_nr_sectors -= fs->scount;
750 fd_req->buffer += fs->scount * 512;
751 if (fd_req->current_nr_sectors <= 0) {
752 end_request(fd_req, 1);
753 fs->state = idle;
754 } else {
755 fs->req_sector += fs->scount; 755 fs->req_sector += fs->scount;
756 if (fs->req_sector > fs->secpertrack) { 756 if (fs->req_sector > fs->secpertrack) {
757 fs->req_sector -= fs->secpertrack; 757 fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
761 } 761 }
762 } 762 }
763 act(fs); 763 act(fs);
764 } 764 } else
765 fs->state = idle;
765 } 766 }
766 if (fs->state == idle) 767 if (fs->state == idle)
767 start_request(fs); 768 start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf0..da403b6a7f4 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = __blk_end_request(req, error, blk_rq_bytes(req)); 752 __blk_end_request_all(req, error);
753 assert(rc == 0);
754 753
755 rc = carm_put_request(host, crq); 754 rc = carm_put_request(host, crq);
756 assert(rc == 0); 755 assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
811 810
812 while (1) { 811 while (1) {
813 DPRINTK("get req\n"); 812 DPRINTK("get req\n");
814 rq = elv_next_request(q); 813 rq = blk_fetch_request(q);
815 if (!rq) 814 if (!rq)
816 break; 815 break;
817 816
818 blkdev_dequeue_request(rq);
819
820 crq = rq->special; 817 crq = rq->special;
821 assert(crq != NULL); 818 assert(crq != NULL);
822 assert(crq->rq == rq); 819 assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
847 844
848queue_one_request: 845queue_one_request:
849 VPRINTK("get req\n"); 846 VPRINTK("get req\n");
850 rq = elv_next_request(q); 847 rq = blk_peek_request(q);
851 if (!rq) 848 if (!rq)
852 return; 849 return;
853 850
@@ -858,7 +855,7 @@ queue_one_request:
858 } 855 }
859 crq->rq = rq; 856 crq->rq = rq;
860 857
861 blkdev_dequeue_request(rq); 858 blk_start_request(rq);
862 859
863 if (rq_data_dir(rq) == WRITE) { 860 if (rq_data_dir(rq) == WRITE) {
864 writing = 1; 861 writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
904 msg->sg_count = n_elem; 901 msg->sg_count = n_elem;
905 msg->sg_type = SGT_32BIT; 902 msg->sg_type = SGT_32BIT;
906 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 903 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
907 msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 904 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
908 tmp = (rq->sector >> 16) >> 16; 905 tmp = (blk_rq_pos(rq) >> 16) >> 16;
909 msg->lba_high = cpu_to_le16( (u16) tmp ); 906 msg->lba_high = cpu_to_le16( (u16) tmp );
910 msg->lba_count = cpu_to_le16(rq->nr_sectors); 907 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
911 908
912 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 909 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
913 for (i = 0; i < n_elem; i++) { 910 for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac89..e67bbae9547 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq); 361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
363static void ub_end_rq(struct request *rq, unsigned int status, 363static void ub_end_rq(struct request *rq, unsigned int status);
364 unsigned int cmd_len);
365static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
366 struct ub_request *urq, struct ub_scsi_cmd *cmd); 365 struct ub_request *urq, struct ub_scsi_cmd *cmd);
367static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
627 struct ub_lun *lun = q->queuedata; 626 struct ub_lun *lun = q->queuedata;
628 struct request *rq; 627 struct request *rq;
629 628
630 while ((rq = elv_next_request(q)) != NULL) { 629 while ((rq = blk_peek_request(q)) != NULL) {
631 if (ub_request_fn_1(lun, rq) != 0) { 630 if (ub_request_fn_1(lun, rq) != 0) {
632 blk_stop_queue(q); 631 blk_stop_queue(q);
633 break; 632 break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
643 int n_elem; 642 int n_elem;
644 643
645 if (atomic_read(&sc->poison)) { 644 if (atomic_read(&sc->poison)) {
646 blkdev_dequeue_request(rq); 645 blk_start_request(rq);
647 ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); 646 ub_end_rq(rq, DID_NO_CONNECT << 16);
648 return 0; 647 return 0;
649 } 648 }
650 649
651 if (lun->changed && !blk_pc_request(rq)) { 650 if (lun->changed && !blk_pc_request(rq)) {
652 blkdev_dequeue_request(rq); 651 blk_start_request(rq);
653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); 652 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
654 return 0; 653 return 0;
655 } 654 }
656 655
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
660 return -1; 659 return -1;
661 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 660 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
662 661
663 blkdev_dequeue_request(rq); 662 blk_start_request(rq);
664 663
665 urq = &lun->urq; 664 urq = &lun->urq;
666 memset(urq, 0, sizeof(struct ub_request)); 665 memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
702 701
703drop: 702drop:
704 ub_put_cmd(lun, cmd); 703 ub_put_cmd(lun, cmd);
705 ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); 704 ub_end_rq(rq, DID_ERROR << 16);
706 return 0; 705 return 0;
707} 706}
708 707
@@ -726,8 +725,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
726 * The call to blk_queue_hardsect_size() guarantees that request 725 * The call to blk_queue_hardsect_size() guarantees that request
727 * is aligned, but it is given in terms of 512 byte units, always. 726 * is aligned, but it is given in terms of 512 byte units, always.
728 */ 727 */
729 block = rq->sector >> lun->capacity.bshift; 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
730 nblks = rq->nr_sectors >> lun->capacity.bshift; 729 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
731 730
732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 731 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 732 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
739 cmd->cdb[8] = nblks; 738 cmd->cdb[8] = nblks;
740 cmd->cdb_len = 10; 739 cmd->cdb_len = 10;
741 740
742 cmd->len = rq->nr_sectors * 512; 741 cmd->len = blk_rq_bytes(rq);
743} 742}
744 743
745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
747{ 746{
748 struct request *rq = urq->rq; 747 struct request *rq = urq->rq;
749 748
750 if (rq->data_len == 0) { 749 if (blk_rq_bytes(rq) == 0) {
751 cmd->dir = UB_DIR_NONE; 750 cmd->dir = UB_DIR_NONE;
752 } else { 751 } else {
753 if (rq_data_dir(rq) == WRITE) 752 if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 761 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
763 cmd->cdb_len = rq->cmd_len; 762 cmd->cdb_len = rq->cmd_len;
764 763
765 cmd->len = rq->data_len; 764 cmd->len = blk_rq_bytes(rq);
766 765
767 /* 766 /*
768 * To reapply this to every URB is not as incorrect as it looks. 767 * To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
777 struct ub_request *urq = cmd->back; 776 struct ub_request *urq = cmd->back;
778 struct request *rq; 777 struct request *rq;
779 unsigned int scsi_status; 778 unsigned int scsi_status;
780 unsigned int cmd_len;
781 779
782 rq = urq->rq; 780 rq = urq->rq;
783 781
784 if (cmd->error == 0) { 782 if (cmd->error == 0) {
785 if (blk_pc_request(rq)) { 783 if (blk_pc_request(rq)) {
786 if (cmd->act_len >= rq->data_len) 784 if (cmd->act_len >= rq->resid_len)
787 rq->data_len = 0; 785 rq->resid_len = 0;
788 else 786 else
789 rq->data_len -= cmd->act_len; 787 rq->resid_len -= cmd->act_len;
790 scsi_status = 0; 788 scsi_status = 0;
791 } else { 789 } else {
792 if (cmd->act_len != cmd->len) { 790 if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
818 816
819 urq->rq = NULL; 817 urq->rq = NULL;
820 818
821 cmd_len = cmd->len;
822 ub_put_cmd(lun, cmd); 819 ub_put_cmd(lun, cmd);
823 ub_end_rq(rq, scsi_status, cmd_len); 820 ub_end_rq(rq, scsi_status);
824 blk_start_queue(lun->disk->queue); 821 blk_start_queue(lun->disk->queue);
825} 822}
826 823
827static void ub_end_rq(struct request *rq, unsigned int scsi_status, 824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
828 unsigned int cmd_len)
829{ 825{
830 int error; 826 int error;
831 long rqlen;
832 827
833 if (scsi_status == 0) { 828 if (scsi_status == 0) {
834 error = 0; 829 error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
836 error = -EIO; 831 error = -EIO;
837 rq->errors = scsi_status; 832 rq->errors = scsi_status;
838 } 833 }
839 rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ 834 __blk_end_request_all(rq, error);
840 if (__blk_end_request(rq, error, cmd_len)) {
841 printk(KERN_WARNING DRV_NAME
842 ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
843 blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
844 }
845} 835}
846 836
847static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2..390d69bb7c4 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
252 struct viodasd_device *d; 252 struct viodasd_device *d;
253 unsigned long flags; 253 unsigned long flags;
254 254
255 start = (u64)req->sector << 9; 255 start = (u64)blk_rq_pos(req) << 9;
256 256
257 if (rq_data_dir(req) == READ) { 257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE; 258 direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
361 * back later. 361 * back later.
362 */ 362 */
363 while (num_req_outstanding < VIOMAXREQ) { 363 while (num_req_outstanding < VIOMAXREQ) {
364 req = elv_next_request(q); 364 req = blk_fetch_request(q);
365 if (req == NULL) 365 if (req == NULL)
366 return; 366 return;
367 /* dequeue the current request from the queue */
368 blkdev_dequeue_request(req);
369 /* check that request contains a valid command */ 367 /* check that request contains a valid command */
370 if (!blk_fs_request(req)) { 368 if (!blk_fs_request(req)) {
371 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 369 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
372 continue; 370 continue;
373 } 371 }
374 /* Try sending the request */ 372 /* Try sending the request */
375 if (send_request(req) != 0) 373 if (send_request(req) != 0)
376 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 374 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
377 } 375 }
378} 376}
379 377
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 588 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 589 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
592 event->xRc, bevent->sub_result, err->msg); 590 event->xRc, bevent->sub_result, err->msg);
593 num_sect = req->hard_nr_sectors; 591 num_sect = blk_rq_sectors(req);
594 } 592 }
595 qlock = req->q->queue_lock; 593 qlock = req->q->queue_lock;
596 spin_lock_irqsave(qlock, irq_flags); 594 spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a8..511d4ae2d17 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
37 struct list_head list; 37 struct list_head list;
38 struct request *req; 38 struct request *req;
39 struct virtio_blk_outhdr out_hdr; 39 struct virtio_blk_outhdr out_hdr;
40 struct virtio_scsi_inhdr in_hdr;
40 u8 status; 41 u8 status;
41}; 42};
42 43
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
50 spin_lock_irqsave(&vblk->lock, flags); 51 spin_lock_irqsave(&vblk->lock, flags);
51 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 52 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
52 int error; 53 int error;
54
53 switch (vbr->status) { 55 switch (vbr->status) {
54 case VIRTIO_BLK_S_OK: 56 case VIRTIO_BLK_S_OK:
55 error = 0; 57 error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
62 break; 64 break;
63 } 65 }
64 66
65 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 67 if (blk_pc_request(vbr->req)) {
68 vbr->req->resid_len = vbr->in_hdr.residual;
69 vbr->req->sense_len = vbr->in_hdr.sense_len;
70 vbr->req->errors = vbr->in_hdr.errors;
71 }
72
73 __blk_end_request_all(vbr->req, error);
66 list_del(&vbr->list); 74 list_del(&vbr->list);
67 mempool_free(vbr, vblk->pool); 75 mempool_free(vbr, vblk->pool);
68 } 76 }
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
74static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 82static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
75 struct request *req) 83 struct request *req)
76{ 84{
77 unsigned long num, out, in; 85 unsigned long num, out = 0, in = 0;
78 struct virtblk_req *vbr; 86 struct virtblk_req *vbr;
79 87
80 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 88 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
85 vbr->req = req; 93 vbr->req = req;
86 if (blk_fs_request(vbr->req)) { 94 if (blk_fs_request(vbr->req)) {
87 vbr->out_hdr.type = 0; 95 vbr->out_hdr.type = 0;
88 vbr->out_hdr.sector = vbr->req->sector; 96 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 97 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
90 } else if (blk_pc_request(vbr->req)) { 98 } else if (blk_pc_request(vbr->req)) {
91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 99 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
99 if (blk_barrier_rq(vbr->req)) 107 if (blk_barrier_rq(vbr->req))
100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 108 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
101 109
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 110 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
105 111
106 if (rq_data_dir(vbr->req) == WRITE) { 112 /*
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 113 * If this is a packet command we need a couple of additional headers.
108 out = 1 + num; 114 * Behind the normal outhdr we put a segment with the scsi command
109 in = 1; 115 * block, and before the normal inhdr we put the sense data and the
110 } else { 116 * inhdr with additional status information before the normal inhdr.
111 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 117 */
112 out = 1; 118 if (blk_pc_request(vbr->req))
113 in = 1 + num; 119 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
120
121 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
122
123 if (blk_pc_request(vbr->req)) {
124 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
125 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
126 sizeof(vbr->in_hdr));
127 }
128
129 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
130 sizeof(vbr->status));
131
132 if (num) {
133 if (rq_data_dir(vbr->req) == WRITE) {
134 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
135 out += num;
136 } else {
137 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
138 in += num;
139 }
114 } 140 }
115 141
116 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { 142 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
124 150
125static void do_virtblk_request(struct request_queue *q) 151static void do_virtblk_request(struct request_queue *q)
126{ 152{
127 struct virtio_blk *vblk = NULL; 153 struct virtio_blk *vblk = q->queuedata;
128 struct request *req; 154 struct request *req;
129 unsigned int issued = 0; 155 unsigned int issued = 0;
130 156
131 while ((req = elv_next_request(q)) != NULL) { 157 while ((req = blk_peek_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 158 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 159
135 /* If this request fails, stop queue and wait for something to 160 /* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
138 blk_stop_queue(q); 163 blk_stop_queue(q);
139 break; 164 break;
140 } 165 }
141 blkdev_dequeue_request(req); 166 blk_start_request(req);
142 issued++; 167 issued++;
143 } 168 }
144 169
@@ -149,8 +174,16 @@ static void do_virtblk_request(struct request_queue *q)
149static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 174static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
150 unsigned cmd, unsigned long data) 175 unsigned cmd, unsigned long data)
151{ 176{
152 return scsi_cmd_ioctl(bdev->bd_disk->queue, 177 struct gendisk *disk = bdev->bd_disk;
153 bdev->bd_disk, mode, cmd, 178 struct virtio_blk *vblk = disk->private_data;
179
180 /*
181 * Only allow the generic SCSI ioctls if the host can support it.
182 */
183 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
184 return -ENOIOCTLCMD;
185
186 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
154 (void __user *)data); 187 (void __user *)data);
155} 188}
156 189
@@ -249,6 +282,7 @@ static int virtblk_probe(struct virtio_device *vdev)
249 goto out_put_disk; 282 goto out_put_disk;
250 } 283 }
251 284
285 vblk->disk->queue->queuedata = vblk;
252 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); 286 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
253 287
254 if (index < 26) { 288 if (index < 26) {
@@ -356,6 +390,7 @@ static struct virtio_device_id id_table[] = {
356static unsigned int features[] = { 390static unsigned int features[] = {
357 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 391 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
358 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 392 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
393 VIRTIO_BLK_F_SCSI,
359}; 394};
360 395
361static struct virtio_driver virtio_blk = { 396static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98..ce242921992 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
305 if (xdc_busy) 305 if (xdc_busy)
306 return; 306 return;
307 307
308 while ((req = elv_next_request(q)) != NULL) { 308 req = blk_fetch_request(q);
309 unsigned block = req->sector; 309 while (req) {
310 unsigned count = req->nr_sectors; 310 unsigned block = blk_rq_pos(req);
311 int rw = rq_data_dir(req); 311 unsigned count = blk_rq_cur_sectors(req);
312 XD_INFO *disk = req->rq_disk->private_data; 312 XD_INFO *disk = req->rq_disk->private_data;
313 int res = 0; 313 int res = -EIO;
314 int retry; 314 int retry;
315 315
316 if (!blk_fs_request(req)) { 316 if (!blk_fs_request(req))
317 end_request(req, 0); 317 goto done;
318 continue; 318 if (block + count > get_capacity(req->rq_disk))
319 } 319 goto done;
320 if (block + count > get_capacity(req->rq_disk)) {
321 end_request(req, 0);
322 continue;
323 }
324 if (rw != READ && rw != WRITE) {
325 printk("do_xd_request: unknown request\n");
326 end_request(req, 0);
327 continue;
328 }
329 for (retry = 0; (retry < XD_RETRIES) && !res; retry++) 320 for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
330 res = xd_readwrite(rw, disk, req->buffer, block, count); 321 res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
331 end_request(req, res); /* wrap up, 0 = fail, 1 = success */ 322 block, count);
323 done:
324 /* wrap up, 0 = success, -errno = fail */
325 if (!__blk_end_request_cur(req, res))
326 req = blk_fetch_request(q);
332 } 327 }
333} 328}
334 329
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
418 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); 413 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
419 xd_recalibrate(drive); 414 xd_recalibrate(drive);
420 spin_lock_irq(&xd_lock); 415 spin_lock_irq(&xd_lock);
421 return (0); 416 return -EIO;
422 case 2: 417 case 2:
423 if (sense[0] & 0x30) { 418 if (sense[0] & 0x30) {
424 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); 419 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
439 else 434 else
440 printk(" - no valid disk address\n"); 435 printk(" - no valid disk address\n");
441 spin_lock_irq(&xd_lock); 436 spin_lock_irq(&xd_lock);
442 return (0); 437 return -EIO;
443 } 438 }
444 if (xd_dma_buffer) 439 if (xd_dma_buffer)
445 for (i=0; i < (temp * 0x200); i++) 440 for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
448 count -= temp, buffer += temp * 0x200, block += temp; 443 count -= temp, buffer += temp * 0x200, block += temp;
449 } 444 }
450 spin_lock_irq(&xd_lock); 445 spin_lock_irq(&xd_lock);
451 return (1); 446 return 0;
452} 447}
453 448
454/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ 449/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a6cbf7b808e..132120ae4bd 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
122static int get_id_from_freelist(struct blkfront_info *info) 122static int get_id_from_freelist(struct blkfront_info *info)
123{ 123{
124 unsigned long free = info->shadow_free; 124 unsigned long free = info->shadow_free;
125 BUG_ON(free > BLK_RING_SIZE); 125 BUG_ON(free >= BLK_RING_SIZE);
126 info->shadow_free = info->shadow[free].req.id; 126 info->shadow_free = info->shadow[free].req.id;
127 info->shadow[free].req.id = 0x0fffffee; /* debug */ 127 info->shadow[free].req.id = 0x0fffffee; /* debug */
128 return free; 128 return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
231 info->shadow[id].request = (unsigned long)req; 231 info->shadow[id].request = (unsigned long)req;
232 232
233 ring_req->id = id; 233 ring_req->id = id;
234 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
235 ring_req->handle = info->handle; 235 ring_req->handle = info->handle;
236 236
237 ring_req->operation = rq_data_dir(req) ? 237 ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
299 299
300 queued = 0; 300 queued = 0;
301 301
302 while ((req = elv_next_request(rq)) != NULL) { 302 while ((req = blk_peek_request(rq)) != NULL) {
303 info = req->rq_disk->private_data; 303 info = req->rq_disk->private_data;
304 if (!blk_fs_request(req)) {
305 end_request(req, 0);
306 continue;
307 }
308 304
309 if (RING_FULL(&info->ring)) 305 if (RING_FULL(&info->ring))
310 goto wait; 306 goto wait;
311 307
312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 308 blk_start_request(req);
313 "(%u/%li) buffer:%p [%s]\n",
314 req, req->cmd, (unsigned long)req->sector,
315 req->current_nr_sectors,
316 req->nr_sectors, req->buffer,
317 rq_data_dir(req) ? "write" : "read");
318 309
310 if (!blk_fs_request(req)) {
311 __blk_end_request_all(req, -EIO);
312 continue;
313 }
314
315 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
316 "(%u/%u) buffer:%p [%s]\n",
317 req, req->cmd, (unsigned long)blk_rq_pos(req),
318 blk_rq_cur_sectors(req), blk_rq_sectors(req),
319 req->buffer, rq_data_dir(req) ? "write" : "read");
319 320
320 blkdev_dequeue_request(req);
321 if (blkif_queue_request(req)) { 321 if (blkif_queue_request(req)) {
322 blk_requeue_request(rq, req); 322 blk_requeue_request(rq, req);
323wait: 323wait:
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
551 551
552 for (i = info->ring.rsp_cons; i != rp; i++) { 552 for (i = info->ring.rsp_cons; i != rp; i++) {
553 unsigned long id; 553 unsigned long id;
554 int ret;
555 554
556 bret = RING_GET_RESPONSE(&info->ring, i); 555 bret = RING_GET_RESPONSE(&info->ring, i);
557 id = bret->id; 556 id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 577 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
579 "request: %x\n", bret->status); 578 "request: %x\n", bret->status);
580 579
581 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 580 __blk_end_request_all(req, error);
582 BUG_ON(ret);
583 break; 581 break;
584 default: 582 default:
585 BUG(); 583 BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a9..3a4397edab7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
463{ 463{
464 struct request *req; 464 struct request *req;
465 465
466 while ((req = elv_next_request(q)) != NULL) { 466 while ((req = blk_peek_request(q)) != NULL) {
467 if (blk_fs_request(req)) 467 if (blk_fs_request(req))
468 break; 468 break;
469 end_request(req, 0); 469 blk_start_request(req);
470 __blk_end_request_all(req, -EIO);
470 } 471 }
471 return req; 472 return req;
472} 473}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
492 set_capacity(ace->gd, 0); 493 set_capacity(ace->gd, 0);
493 dev_info(ace->dev, "No CF in slot\n"); 494 dev_info(ace->dev, "No CF in slot\n");
494 495
495 /* Drop all pending requests */ 496 /* Drop all in-flight and pending requests */
496 while ((req = elv_next_request(ace->queue)) != NULL) 497 if (ace->req) {
497 end_request(req, 0); 498 __blk_end_request_all(ace->req, -EIO);
499 ace->req = NULL;
500 }
501 while ((req = blk_fetch_request(ace->queue)) != NULL)
502 __blk_end_request_all(req, -EIO);
498 503
499 /* Drop back to IDLE state and notify waiters */ 504 /* Drop back to IDLE state and notify waiters */
500 ace->fsm_state = ACE_FSM_STATE_IDLE; 505 ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
642 ace->fsm_state = ACE_FSM_STATE_IDLE; 647 ace->fsm_state = ACE_FSM_STATE_IDLE;
643 break; 648 break;
644 } 649 }
650 blk_start_request(req);
645 651
646 /* Okay, it's a data request, set it up for transfer */ 652 /* Okay, it's a data request, set it up for transfer */
647 dev_dbg(ace->dev, 653 dev_dbg(ace->dev,
648 "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", 654 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
649 (unsigned long long) req->sector, req->hard_nr_sectors, 655 (unsigned long long)blk_rq_pos(req),
650 req->current_nr_sectors, rq_data_dir(req)); 656 blk_rq_sectors(req), blk_rq_cur_sectors(req),
657 rq_data_dir(req));
651 658
652 ace->req = req; 659 ace->req = req;
653 ace->data_ptr = req->buffer; 660 ace->data_ptr = req->buffer;
654 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 661 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
655 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 662 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
656 663
657 count = req->hard_nr_sectors; 664 count = blk_rq_sectors(req);
658 if (rq_data_dir(req)) { 665 if (rq_data_dir(req)) {
659 /* Kick off write request */ 666 /* Kick off write request */
660 dev_dbg(ace->dev, "write data\n"); 667 dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
688 dev_dbg(ace->dev, 695 dev_dbg(ace->dev,
689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 696 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
690 ace->fsm_task, ace->fsm_iter_num, 697 ace->fsm_task, ace->fsm_iter_num,
691 ace->req->current_nr_sectors * 16, 698 blk_rq_cur_sectors(ace->req) * 16,
692 ace->data_count, ace->in_irq); 699 ace->data_count, ace->in_irq);
693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 700 ace_fsm_yield(ace); /* need to poll CFBSY bit */
694 break; 701 break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
697 dev_dbg(ace->dev, 704 dev_dbg(ace->dev,
698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 705 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
699 ace->fsm_task, ace->fsm_iter_num, 706 ace->fsm_task, ace->fsm_iter_num,
700 ace->req->current_nr_sectors * 16, 707 blk_rq_cur_sectors(ace->req) * 16,
701 ace->data_count, ace->in_irq); 708 ace->data_count, ace->in_irq);
702 ace_fsm_yieldirq(ace); 709 ace_fsm_yieldirq(ace);
703 break; 710 break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
717 } 724 }
718 725
719 /* bio finished; is there another one? */ 726 /* bio finished; is there another one? */
720 if (__blk_end_request(ace->req, 0, 727 if (__blk_end_request_cur(ace->req, 0)) {
721 blk_rq_cur_bytes(ace->req))) { 728 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
722 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 729 * blk_rq_sectors(ace->req),
723 * ace->req->hard_nr_sectors, 730 * blk_rq_cur_sectors(ace->req));
724 * ace->req->current_nr_sectors);
725 */ 731 */
726 ace->data_ptr = ace->req->buffer; 732 ace->data_ptr = ace->req->buffer;
727 ace->data_count = ace->req->current_nr_sectors * 16; 733 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
728 ace_fsm_yieldirq(ace); 734 ace_fsm_yieldirq(ace);
729 break; 735 break;
730 } 736 }
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd311..4575171e5be 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
70static void do_z2_request(struct request_queue *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73
74 unsigned long start = req->sector << 9; 74 req = blk_fetch_request(q);
75 unsigned long len = req->current_nr_sectors << 9; 75 while (req) {
76 unsigned long start = blk_rq_pos(req) << 9;
77 unsigned long len = blk_rq_cur_bytes(req);
78 int err = 0;
76 79
77 if (start + len > z2ram_size) { 80 if (start + len > z2ram_size) {
78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
79 req->sector, req->current_nr_sectors); 82 blk_rq_pos(req), blk_rq_cur_sectors(req));
80 end_request(req, 0); 83 err = -EIO;
81 continue; 84 goto done;
82 } 85 }
83 while (len) { 86 while (len) {
84 unsigned long addr = start & Z2RAM_CHUNKMASK; 87 unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
93 start += size; 96 start += size;
94 len -= size; 97 len -= size;
95 } 98 }
96 end_request(req, 1); 99 done:
100 if (!__blk_end_request_cur(req, err))
101 req = blk_fetch_request(q);
97 } 102 }
98} 103}
99 104
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437..1e366ad8f68 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
584 list_for_each_safe(elem, next, &gdrom_deferred) { 584 list_for_each_safe(elem, next, &gdrom_deferred) {
585 req = list_entry(elem, struct request, queuelist); 585 req = list_entry(elem, struct request, queuelist);
586 spin_unlock(&gdrom_lock); 586 spin_unlock(&gdrom_lock);
587 block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 587 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
588 block_cnt = req->nr_sectors/GD_TO_BLK; 588 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
632 * before handling ending the request */ 632 * before handling ending the request */
633 spin_lock(&gdrom_lock); 633 spin_lock(&gdrom_lock);
634 list_del_init(&req->queuelist); 634 list_del_init(&req->queuelist);
635 __blk_end_request(req, err, blk_rq_bytes(req)); 635 __blk_end_request_all(req, err);
636 } 636 }
637 spin_unlock(&gdrom_lock); 637 spin_unlock(&gdrom_lock);
638 kfree(read_command); 638 kfree(read_command);
639} 639}
640 640
641static void gdrom_request_handler_dma(struct request *req)
642{
643 /* dequeue, add to list of deferred work
644 * and then schedule workqueue */
645 blkdev_dequeue_request(req);
646 list_add_tail(&req->queuelist, &gdrom_deferred);
647 schedule_work(&work);
648}
649
650static void gdrom_request(struct request_queue *rq) 641static void gdrom_request(struct request_queue *rq)
651{ 642{
652 struct request *req; 643 struct request *req;
653 644
654 while ((req = elv_next_request(rq)) != NULL) { 645 while ((req = blk_fetch_request(rq)) != NULL) {
655 if (!blk_fs_request(req)) { 646 if (!blk_fs_request(req)) {
656 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
657 end_request(req, 0); 648 __blk_end_request_all(req, -EIO);
649 continue;
658 } 650 }
659 if (rq_data_dir(req) != READ) { 651 if (rq_data_dir(req) != READ) {
660 printk(KERN_NOTICE "GDROM: Read only device -"); 652 printk(KERN_NOTICE "GDROM: Read only device -");
661 printk(" write request ignored\n"); 653 printk(" write request ignored\n");
662 end_request(req, 0); 654 __blk_end_request_all(req, -EIO);
655 continue;
663 } 656 }
664 if (req->nr_sectors) 657
665 gdrom_request_handler_dma(req); 658 /*
666 else 659 * Add to list of deferred work and then schedule
667 end_request(req, 0); 660 * workqueue.
661 */
662 list_add_tail(&req->queuelist, &gdrom_deferred);
663 schedule_work(&work);
668 } 664 }
669} 665}
670 666
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9b1624e0dde..f177c2d4017 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
282 viopath_targetinst(viopath_hostLp), 282 viopath_targetinst(viopath_hostLp),
283 (u64)req, VIOVERSION << 16, 283 (u64)req, VIOVERSION << 16,
284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
285 (u64)req->sector * 512, len, 0); 285 (u64)blk_rq_pos(req) * 512, len, 0);
286 if (hvrc != HvLpEvent_Rc_Good) { 286 if (hvrc != HvLpEvent_Rc_Good) {
287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
288 return -1; 288 return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
291 return 0; 291 return 0;
292} 292}
293 293
294static void viocd_end_request(struct request *req, int error)
295{
296 int nsectors = req->hard_nr_sectors;
297
298 /*
299 * Make sure it's fully ended, and ensure that we process
300 * at least one sector.
301 */
302 if (blk_pc_request(req))
303 nsectors = (req->data_len + 511) >> 9;
304 if (!nsectors)
305 nsectors = 1;
306
307 if (__blk_end_request(req, error, nsectors << 9))
308 BUG();
309}
310
311static int rwreq; 294static int rwreq;
312 295
313static void do_viocd_request(struct request_queue *q) 296static void do_viocd_request(struct request_queue *q)
314{ 297{
315 struct request *req; 298 struct request *req;
316 299
317 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
318 if (!blk_fs_request(req)) 301 if (!blk_fs_request(req))
319 viocd_end_request(req, -EIO); 302 __blk_end_request_all(req, -EIO);
320 else if (send_request(req) < 0) { 303 else if (send_request(req) < 0) {
321 printk(VIOCD_KERN_WARNING 304 printk(VIOCD_KERN_WARNING
322 "unable to send message to OS/400!"); 305 "unable to send message to OS/400!");
323 viocd_end_request(req, -EIO); 306 __blk_end_request_all(req, -EIO);
324 } else 307 } else
325 rwreq++; 308 rwreq++;
326 } 309 }
@@ -531,9 +514,9 @@ return_complete:
531 "with rc %d:0x%04X: %s\n", 514 "with rc %d:0x%04X: %s\n",
532 req, event->xRc, 515 req, event->xRc,
533 bevent->sub_result, err->msg); 516 bevent->sub_result, err->msg);
534 viocd_end_request(req, -EIO); 517 __blk_end_request_all(req, -EIO);
535 } else 518 } else
536 viocd_end_request(req, 0); 519 __blk_end_request_all(req, 0);
537 520
538 /* restart handling of incoming requests */ 521 /* restart handling of incoming requests */
539 spin_unlock_irqrestore(&viocd_reqlock, flags); 522 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 7201b176d75..8a894fa37b5 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -80,34 +80,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
80EXPORT_SYMBOL_GPL(ide_init_pc); 80EXPORT_SYMBOL_GPL(ide_init_pc);
81 81
82/* 82/*
83 * Generate a new packet command request in front of the request queue, before
84 * the current request, so that it will be processed immediately, on the next
85 * pass through the driver.
86 */
87static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
88 struct ide_atapi_pc *pc, struct request *rq)
89{
90 blk_rq_init(NULL, rq);
91 rq->cmd_type = REQ_TYPE_SPECIAL;
92 rq->cmd_flags |= REQ_PREEMPT;
93 rq->buffer = (char *)pc;
94 rq->rq_disk = disk;
95
96 if (pc->req_xfer) {
97 rq->data = pc->buf;
98 rq->data_len = pc->req_xfer;
99 }
100
101 memcpy(rq->cmd, pc->c, 12);
102 if (drive->media == ide_tape)
103 rq->cmd[13] = REQ_IDETAPE_PC1;
104
105 drive->hwif->rq = NULL;
106
107 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
108}
109
110/*
111 * Add a special packet command request to the tail of the request queue, 83 * Add a special packet command request to the tail of the request queue,
112 * and wait for it to be serviced. 84 * and wait for it to be serviced.
113 */ 85 */
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
119 91
120 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 92 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
121 rq->cmd_type = REQ_TYPE_SPECIAL; 93 rq->cmd_type = REQ_TYPE_SPECIAL;
122 rq->buffer = (char *)pc; 94 rq->special = (char *)pc;
123 95
124 if (pc->req_xfer) { 96 if (pc->req_xfer) {
125 rq->data = pc->buf; 97 error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
126 rq->data_len = pc->req_xfer; 98 GFP_NOIO);
99 if (error)
100 goto put_req;
127 } 101 }
128 102
129 memcpy(rq->cmd, pc->c, 12); 103 memcpy(rq->cmd, pc->c, 12);
130 if (drive->media == ide_tape) 104 if (drive->media == ide_tape)
131 rq->cmd[13] = REQ_IDETAPE_PC1; 105 rq->cmd[13] = REQ_IDETAPE_PC1;
132 error = blk_execute_rq(drive->queue, disk, rq, 0); 106 error = blk_execute_rq(drive->queue, disk, rq, 0);
107put_req:
133 blk_put_request(rq); 108 blk_put_request(rq);
134
135 return error; 109 return error;
136} 110}
137EXPORT_SYMBOL_GPL(ide_queue_pc_tail); 111EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
191} 165}
192EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); 166EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
193 167
168void ide_prep_sense(ide_drive_t *drive, struct request *rq)
169{
170 struct request_sense *sense = &drive->sense_data;
171 struct request *sense_rq = &drive->sense_rq;
172 unsigned int cmd_len, sense_len;
173 int err;
174
175 debug_log("%s: enter\n", __func__);
176
177 switch (drive->media) {
178 case ide_floppy:
179 cmd_len = 255;
180 sense_len = 18;
181 break;
182 case ide_tape:
183 cmd_len = 20;
184 sense_len = 20;
185 break;
186 default:
187 cmd_len = 18;
188 sense_len = 18;
189 }
190
191 BUG_ON(sense_len > sizeof(*sense));
192
193 if (blk_sense_request(rq) || drive->sense_rq_armed)
194 return;
195
196 memset(sense, 0, sizeof(*sense));
197
198 blk_rq_init(rq->q, sense_rq);
199
200 err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
201 GFP_NOIO);
202 if (unlikely(err)) {
203 if (printk_ratelimit())
204 printk(KERN_WARNING "%s: failed to map sense buffer\n",
205 drive->name);
206 return;
207 }
208
209 sense_rq->rq_disk = rq->rq_disk;
210 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
211 sense_rq->cmd[4] = cmd_len;
212 sense_rq->cmd_type = REQ_TYPE_SENSE;
213 sense_rq->cmd_flags |= REQ_PREEMPT;
214
215 if (drive->media == ide_tape)
216 sense_rq->cmd[13] = REQ_IDETAPE_PC1;
217
218 drive->sense_rq_armed = true;
219}
220EXPORT_SYMBOL_GPL(ide_prep_sense);
221
222int ide_queue_sense_rq(ide_drive_t *drive, void *special)
223{
224 /* deferred failure from ide_prep_sense() */
225 if (!drive->sense_rq_armed) {
226 printk(KERN_WARNING "%s: failed queue sense request\n",
227 drive->name);
228 return -ENOMEM;
229 }
230
231 drive->sense_rq.special = special;
232 drive->sense_rq_armed = false;
233
234 drive->hwif->rq = NULL;
235
236 elv_add_request(drive->queue, &drive->sense_rq,
237 ELEVATOR_INSERT_FRONT, 0);
238 return 0;
239}
240EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
241
194/* 242/*
195 * Called when an error was detected during the last packet command. 243 * Called when an error was detected during the last packet command.
196 * We queue a request sense packet command in the head of the request list. 244 * We queue a request sense packet command at the head of the request
245 * queue.
197 */ 246 */
198void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk) 247void ide_retry_pc(ide_drive_t *drive)
199{ 248{
200 struct request *rq = &drive->request_sense_rq; 249 struct request *failed_rq = drive->hwif->rq;
250 struct request *sense_rq = &drive->sense_rq;
201 struct ide_atapi_pc *pc = &drive->request_sense_pc; 251 struct ide_atapi_pc *pc = &drive->request_sense_pc;
202 252
203 (void)ide_read_error(drive); 253 (void)ide_read_error(drive);
204 ide_create_request_sense_cmd(drive, pc); 254
255 /* init pc from sense_rq */
256 ide_init_pc(pc);
257 memcpy(pc->c, sense_rq->cmd, 12);
258 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
259 pc->req_xfer = blk_rq_bytes(sense_rq);
260
205 if (drive->media == ide_tape) 261 if (drive->media == ide_tape)
206 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 262 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
207 ide_queue_pc_head(drive, disk, pc, rq); 263
264 /*
265 * Push back the failed request and put request sense on top
266 * of it. The failed command will be retried after sense data
267 * is acquired.
268 */
269 blk_requeue_request(failed_rq->q, failed_rq);
270 drive->hwif->rq = NULL;
271 if (ide_queue_sense_rq(drive, pc)) {
272 blk_start_request(failed_rq);
273 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
274 }
208} 275}
209EXPORT_SYMBOL_GPL(ide_retry_pc); 276EXPORT_SYMBOL_GPL(ide_retry_pc);
210 277
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
246 return 32768; 313 return 32768;
247 else if (blk_sense_request(rq) || blk_pc_request(rq) || 314 else if (blk_sense_request(rq) || blk_pc_request(rq) ||
248 rq->cmd_type == REQ_TYPE_ATA_PC) 315 rq->cmd_type == REQ_TYPE_ATA_PC)
249 return rq->data_len; 316 return blk_rq_bytes(rq);
250 else 317 else
251 return 0; 318 return 0;
252} 319}
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
276 struct ide_cmd *cmd = &hwif->cmd; 343 struct ide_cmd *cmd = &hwif->cmd;
277 struct request *rq = hwif->rq; 344 struct request *rq = hwif->rq;
278 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 345 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
279 xfer_func_t *xferfunc;
280 unsigned int timeout, done; 346 unsigned int timeout, done;
281 u16 bcount; 347 u16 bcount;
282 u8 stat, ireason, dsc = 0; 348 u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
303 drive->name, rq_data_dir(pc->rq) 369 drive->name, rq_data_dir(pc->rq)
304 ? "write" : "read"); 370 ? "write" : "read");
305 pc->flags |= PC_FLAG_DMA_ERROR; 371 pc->flags |= PC_FLAG_DMA_ERROR;
306 } else { 372 } else
307 pc->xferred = pc->req_xfer; 373 pc->xferred = pc->req_xfer;
308 if (drive->pc_update_buffers)
309 drive->pc_update_buffers(drive, pc);
310 }
311 debug_log("%s: DMA finished\n", drive->name); 374 debug_log("%s: DMA finished\n", drive->name);
312 } 375 }
313 376
314 /* No more interrupts */ 377 /* No more interrupts */
315 if ((stat & ATA_DRQ) == 0) { 378 if ((stat & ATA_DRQ) == 0) {
316 int uptodate, error; 379 int uptodate, error;
317 unsigned int done;
318 380
319 debug_log("Packet command completed, %d bytes transferred\n", 381 debug_log("Packet command completed, %d bytes transferred\n",
320 pc->xferred); 382 pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
343 debug_log("[cmd %x]: check condition\n", rq->cmd[0]); 405 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
344 406
345 /* Retry operation */ 407 /* Retry operation */
346 ide_retry_pc(drive, rq->rq_disk); 408 ide_retry_pc(drive);
347 409
348 /* queued, but not started */ 410 /* queued, but not started */
349 return ide_stopped; 411 return ide_stopped;
@@ -361,7 +423,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
361 423
362 if (blk_special_request(rq)) { 424 if (blk_special_request(rq)) {
363 rq->errors = 0; 425 rq->errors = 0;
364 done = blk_rq_bytes(rq);
365 error = 0; 426 error = 0;
366 } else { 427 } else {
367 428
@@ -370,15 +431,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
370 rq->errors = -EIO; 431 rq->errors = -EIO;
371 } 432 }
372 433
373 if (drive->media == ide_tape)
374 done = ide_rq_bytes(rq); /* FIXME */
375 else
376 done = blk_rq_bytes(rq);
377
378 error = uptodate ? 0 : -EIO; 434 error = uptodate ? 0 : -EIO;
379 } 435 }
380 436
381 ide_complete_rq(drive, error, done); 437 ide_complete_rq(drive, error, blk_rq_bytes(rq));
382 return ide_stopped; 438 return ide_stopped;
383 } 439 }
384 440
@@ -407,21 +463,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
407 return ide_do_reset(drive); 463 return ide_do_reset(drive);
408 } 464 }
409 465
410 xferfunc = write ? tp_ops->output_data : tp_ops->input_data; 466 done = min_t(unsigned int, bcount, cmd->nleft);
411 467 ide_pio_bytes(drive, cmd, write, done);
412 if (drive->media == ide_floppy && pc->buf == NULL) {
413 done = min_t(unsigned int, bcount, cmd->nleft);
414 ide_pio_bytes(drive, cmd, write, done);
415 } else if (drive->media == ide_tape && pc->bh) {
416 done = drive->pc_io_buffers(drive, pc, bcount, write);
417 } else {
418 done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
419 xferfunc(drive, NULL, pc->cur_pos, done);
420 }
421 468
422 /* Update the current position */ 469 /* Update transferred byte count */
423 pc->xferred += done; 470 pc->xferred += done;
424 pc->cur_pos += done;
425 471
426 bcount -= done; 472 bcount -= done;
427 473
@@ -599,7 +645,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
599 645
600 /* We haven't transferred any data yet */ 646 /* We haven't transferred any data yet */
601 pc->xferred = 0; 647 pc->xferred = 0;
602 pc->cur_pos = pc->buf;
603 648
604 valid_tf = IDE_VALID_DEVICE; 649 valid_tf = IDE_VALID_DEVICE;
605 bcount = ((drive->media == ide_tape) ? 650 bcount = ((drive->media == ide_tape) ?
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 925eb9e245d..1799328decf 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
206 ide_cd_log_error(drive->name, failed_command, sense); 206 ide_cd_log_error(drive->name, failed_command, sense);
207} 207}
208 208
209static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
210 struct request *failed_command)
211{
212 struct cdrom_info *info = drive->driver_data;
213 struct request *rq = &drive->request_sense_rq;
214
215 ide_debug_log(IDE_DBG_SENSE, "enter");
216
217 if (sense == NULL)
218 sense = &info->sense_data;
219
220 /* stuff the sense request in front of our current request */
221 blk_rq_init(NULL, rq);
222 rq->cmd_type = REQ_TYPE_ATA_PC;
223 rq->rq_disk = info->disk;
224
225 rq->data = sense;
226 rq->cmd[0] = GPCMD_REQUEST_SENSE;
227 rq->cmd[4] = 18;
228 rq->data_len = 18;
229
230 rq->cmd_type = REQ_TYPE_SENSE;
231 rq->cmd_flags |= REQ_PREEMPT;
232
233 /* NOTE! Save the failed command in "rq->buffer" */
234 rq->buffer = (void *) failed_command;
235
236 if (failed_command)
237 ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
238 failed_command->cmd[0]);
239
240 drive->hwif->rq = NULL;
241
242 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
243}
244
245static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) 209static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
246{ 210{
247 /* 211 /*
248 * For REQ_TYPE_SENSE, "rq->buffer" points to the original 212 * For REQ_TYPE_SENSE, "rq->special" points to the original
249 * failed request 213 * failed request. Also, the sense data should be read
214 * directly from rq which might be different from the original
215 * sense buffer if it got copied during mapping.
250 */ 216 */
251 struct request *failed = (struct request *)rq->buffer; 217 struct request *failed = (struct request *)rq->special;
252 struct cdrom_info *info = drive->driver_data; 218 void *sense = bio_data(rq->bio);
253 void *sense = &info->sense_data;
254 219
255 if (failed) { 220 if (failed) {
256 if (failed->sense) { 221 if (failed->sense) {
222 /*
223 * Sense is always read into drive->sense_data.
224 * Copy back if the failed request has its
225 * sense pointer set.
226 */
227 memcpy(failed->sense, sense, 18);
257 sense = failed->sense; 228 sense = failed->sense;
258 failed->sense_len = rq->sense_len; 229 failed->sense_len = rq->sense_len;
259 } 230 }
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
428 399
429 /* if we got a CHECK_CONDITION status, queue a request sense command */ 400 /* if we got a CHECK_CONDITION status, queue a request sense command */
430 if (stat & ATA_ERR) 401 if (stat & ATA_ERR)
431 cdrom_queue_request_sense(drive, NULL, NULL); 402 return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
432 return 1; 403 return 1;
433 404
434end_request: 405end_request:
435 if (stat & ATA_ERR) { 406 if (stat & ATA_ERR) {
436 struct request_queue *q = drive->queue;
437 unsigned long flags;
438
439 spin_lock_irqsave(q->queue_lock, flags);
440 blkdev_dequeue_request(rq);
441 spin_unlock_irqrestore(q->queue_lock, flags);
442
443 hwif->rq = NULL; 407 hwif->rq = NULL;
444 408 return ide_queue_sense_rq(drive, rq) ? 2 : 1;
445 cdrom_queue_request_sense(drive, rq->sense, rq);
446 return 1;
447 } else 409 } else
448 return 2; 410 return 2;
449} 411}
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
503 * and some drives don't send them. Sigh. 465 * and some drives don't send them. Sigh.
504 */ 466 */
505 if (rq->cmd[0] == GPCMD_REQUEST_SENSE && 467 if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
506 cmd->nleft > 0 && cmd->nleft <= 5) { 468 cmd->nleft > 0 && cmd->nleft <= 5)
507 unsigned int ofs = cmd->nbytes - cmd->nleft; 469 cmd->nleft = 0;
508
509 while (cmd->nleft > 0) {
510 *((u8 *)rq->data + ofs++) = 0;
511 cmd->nleft--;
512 }
513 }
514} 470}
515 471
516int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, 472int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
543 rq->cmd_flags |= cmd_flags; 499 rq->cmd_flags |= cmd_flags;
544 rq->timeout = timeout; 500 rq->timeout = timeout;
545 if (buffer) { 501 if (buffer) {
546 rq->data = buffer; 502 error = blk_rq_map_kern(drive->queue, rq, buffer,
547 rq->data_len = *bufflen; 503 *bufflen, GFP_NOIO);
504 if (error) {
505 blk_put_request(rq);
506 return error;
507 }
548 } 508 }
549 509
550 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 510 error = blk_execute_rq(drive->queue, info->disk, rq, 0);
551 511
552 if (buffer) 512 if (buffer)
553 *bufflen = rq->data_len; 513 *bufflen = rq->resid_len;
554 514
555 flags = rq->cmd_flags; 515 flags = rq->cmd_flags;
556 blk_put_request(rq); 516 blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
608 struct request *rq = hwif->rq; 568 struct request *rq = hwif->rq;
609 ide_expiry_t *expiry = NULL; 569 ide_expiry_t *expiry = NULL;
610 int dma_error = 0, dma, thislen, uptodate = 0; 570 int dma_error = 0, dma, thislen, uptodate = 0;
611 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors; 571 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
612 int sense = blk_sense_request(rq); 572 int sense = blk_sense_request(rq);
613 unsigned int timeout; 573 unsigned int timeout;
614 u16 len; 574 u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
738 698
739out_end: 699out_end:
740 if (blk_pc_request(rq) && rc == 0) { 700 if (blk_pc_request(rq) && rc == 0) {
741 unsigned int dlen = rq->data_len; 701 rq->resid_len = 0;
742 702 blk_end_request_all(rq, 0);
743 rq->data_len = 0;
744
745 if (blk_end_request(rq, 0, dlen))
746 BUG();
747
748 hwif->rq = NULL; 703 hwif->rq = NULL;
749 } else { 704 } else {
750 if (sense && uptodate) 705 if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
762 ide_cd_error_cmd(drive, cmd); 717 ide_cd_error_cmd(drive, cmd);
763 718
764 /* make sure it's fully ended */ 719 /* make sure it's fully ended */
765 if (blk_pc_request(rq))
766 nsectors = (rq->data_len + 511) >> 9;
767 else
768 nsectors = rq->hard_nr_sectors;
769
770 if (nsectors == 0)
771 nsectors = 1;
772
773 if (blk_fs_request(rq) == 0) { 720 if (blk_fs_request(rq) == 0) {
774 rq->data_len -= (cmd->nbytes - cmd->nleft); 721 rq->resid_len -= cmd->nbytes - cmd->nleft;
775 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 722 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
776 rq->data_len += cmd->last_xfer_len; 723 rq->resid_len += cmd->last_xfer_len;
777 } 724 }
778 725
779 ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); 726 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
780 727
781 if (sense && rc == 2) 728 if (sense && rc == 2)
782 ide_error(drive, "request sense failure", stat); 729 ide_error(drive, "request sense failure", stat);
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
809 } 756 }
810 757
811 /* fs requests *must* be hardware frame aligned */ 758 /* fs requests *must* be hardware frame aligned */
812 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 759 if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
813 (rq->sector & (sectors_per_frame - 1))) 760 (blk_rq_pos(rq) & (sectors_per_frame - 1)))
814 return ide_stopped; 761 return ide_stopped;
815 762
816 /* use DMA, if possible */ 763 /* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
838 drive->dma = 0; 785 drive->dma = 0;
839 786
840 /* sg request */ 787 /* sg request */
841 if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) { 788 if (rq->bio) {
842 struct request_queue *q = drive->queue; 789 struct request_queue *q = drive->queue;
790 char *buf = bio_data(rq->bio);
843 unsigned int alignment; 791 unsigned int alignment;
844 char *buf;
845
846 if (rq->bio)
847 buf = bio_data(rq->bio);
848 else
849 buf = rq->data;
850 792
851 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 793 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
852 794
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
858 */ 800 */
859 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 801 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
860 if ((unsigned long)buf & alignment 802 if ((unsigned long)buf & alignment
861 || rq->data_len & q->dma_pad_mask 803 || blk_rq_bytes(rq) & q->dma_pad_mask
862 || object_is_on_stack(buf)) 804 || object_is_on_stack(buf))
863 drive->dma = 0; 805 drive->dma = 0;
864 } 806 }
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
896 goto out_end; 838 goto out_end;
897 } 839 }
898 840
841 /* prepare sense request for this command */
842 ide_prep_sense(drive, rq);
843
899 memset(&cmd, 0, sizeof(cmd)); 844 memset(&cmd, 0, sizeof(cmd));
900 845
901 if (rq_data_dir(rq)) 846 if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
903 848
904 cmd.rq = rq; 849 cmd.rq = rq;
905 850
906 if (blk_fs_request(rq) || rq->data_len) { 851 if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
907 ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9) 852 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
908 : rq->data_len);
909 ide_map_sg(drive, &cmd); 853 ide_map_sg(drive, &cmd);
910 } 854 }
911 855
912 return ide_issue_pc(drive, &cmd); 856 return ide_issue_pc(drive, &cmd);
913out_end: 857out_end:
914 nsectors = rq->hard_nr_sectors; 858 nsectors = blk_rq_sectors(rq);
915 859
916 if (nsectors == 0) 860 if (nsectors == 0)
917 nsectors = 1; 861 nsectors = 1;
@@ -1395,8 +1339,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1395static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1396{ 1340{
1397 int hard_sect = queue_hardsect_size(q); 1341 int hard_sect = queue_hardsect_size(q);
1398 long block = (long)rq->hard_sector / (hard_sect >> 9); 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1399 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1400 1344
1401 memset(rq->cmd, 0, BLK_MAX_CDB); 1345 memset(rq->cmd, 0, BLK_MAX_CDB);
1402 1346
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1d97101099c..93a3cf1b0f3 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -87,10 +87,6 @@ struct cdrom_info {
87 87
88 struct atapi_toc *toc; 88 struct atapi_toc *toc;
89 89
90 /* The result of the last successful request sense command
91 on this device. */
92 struct request_sense sense_data;
93
94 u8 max_speed; /* Max speed of the drive. */ 90 u8 max_speed; /* Max speed of the drive. */
95 u8 current_speed; /* Current speed of the drive. */ 91 u8 current_speed; /* Current speed of the drive. */
96 92
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a9fbe2c3121..ad18e14043c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82 sector_t block) 82 sector_t block)
83{ 83{
84 ide_hwif_t *hwif = drive->hwif; 84 ide_hwif_t *hwif = drive->hwif;
85 u16 nsectors = (u16)rq->nr_sectors; 85 u16 nsectors = (u16)blk_rq_sectors(rq);
86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88 struct ide_cmd cmd; 88 struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
90 ide_startstop_t rc; 90 ide_startstop_t rc;
91 91
92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { 92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93 if (block + rq->nr_sectors > 1ULL << 28) 93 if (block + blk_rq_sectors(rq) > 1ULL << 28)
94 dma = 0; 94 dma = 0;
95 else 95 else
96 lba48 = 0; 96 lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
195 195
196 ledtrig_ide_activity(); 196 ledtrig_ide_activity();
197 197
198 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 198 pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
200 (unsigned long long)block, rq->nr_sectors, 200 (unsigned long long)block, blk_rq_sectors(rq),
201 (unsigned long)rq->buffer); 201 (unsigned long)rq->buffer);
202 202
203 if (hwif->rw_disk) 203 if (hwif->rw_disk)
@@ -411,7 +411,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
411 cmd->protocol = ATA_PROT_NODATA; 411 cmd->protocol = ATA_PROT_NODATA;
412 412
413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
414 rq->cmd_flags |= REQ_SOFTBARRIER;
415 rq->special = cmd; 414 rq->special = cmd;
416} 415}
417 416
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a0b8cab1d9a..001f68f0bb2 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
103 ide_finish_cmd(drive, cmd, stat); 103 ide_finish_cmd(drive, cmd, stat);
104 else 104 else
105 ide_complete_rq(drive, 0, 105 ide_complete_rq(drive, 0,
106 cmd->rq->nr_sectors << 9); 106 blk_rq_sectors(cmd->rq) << 9);
107 return ide_stopped; 107 return ide_stopped;
108 } 108 }
109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", 109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -510,23 +510,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
510 /* 510 /*
511 * un-busy drive etc and make sure request is sane 511 * un-busy drive etc and make sure request is sane
512 */ 512 */
513
514 rq = hwif->rq; 513 rq = hwif->rq;
515 if (!rq) 514 if (rq) {
516 goto out; 515 hwif->rq = NULL;
517 516 rq->errors = 0;
518 hwif->rq = NULL; 517 }
519
520 rq->errors = 0;
521
522 if (!rq->bio)
523 goto out;
524
525 rq->sector = rq->bio->bi_sector;
526 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
527 rq->hard_cur_sectors = rq->current_nr_sectors;
528 rq->buffer = bio_data(rq->bio);
529out:
530 return ret; 518 return ret;
531} 519}
532 520
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2b4868d95f8..650981758f1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
134 drive->pc = pc; 134 drive->pc = pc;
135 135
136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) { 136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
137 unsigned int done = blk_rq_bytes(drive->hwif->rq);
138
137 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR)) 139 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
138 ide_floppy_report_error(floppy, pc); 140 ide_floppy_report_error(floppy, pc);
141
139 /* Giving up */ 142 /* Giving up */
140 pc->error = IDE_DRV_ERROR_GENERAL; 143 pc->error = IDE_DRV_ERROR_GENERAL;
141 144
142 drive->failed_pc = NULL; 145 drive->failed_pc = NULL;
143 drive->pc_callback(drive, 0); 146 drive->pc_callback(drive, 0);
147 ide_complete_rq(drive, -EIO, done);
144 return ide_stopped; 148 return ide_stopped;
145 } 149 }
146 150
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
190{ 194{
191 struct ide_disk_obj *floppy = drive->driver_data; 195 struct ide_disk_obj *floppy = drive->driver_data;
192 int block = sector / floppy->bs_factor; 196 int block = sector / floppy->bs_factor;
193 int blocks = rq->nr_sectors / floppy->bs_factor; 197 int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
194 int cmd = rq_data_dir(rq); 198 int cmd = rq_data_dir(rq);
195 199
196 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); 200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
216 ide_init_pc(pc); 220 ide_init_pc(pc);
217 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 221 memcpy(pc->c, rq->cmd, sizeof(pc->c));
218 pc->rq = rq; 222 pc->rq = rq;
219 if (rq->data_len && rq_data_dir(rq) == WRITE) 223 if (blk_rq_bytes(rq)) {
220 pc->flags |= PC_FLAG_WRITING;
221 pc->buf = rq->data;
222 if (rq->bio)
223 pc->flags |= PC_FLAG_DMA_OK; 224 pc->flags |= PC_FLAG_DMA_OK;
224 /* 225 if (rq_data_dir(rq) == WRITE)
225 * possibly problematic, doesn't look like ide-floppy correctly 226 pc->flags |= PC_FLAG_WRITING;
226 * handled scattered requests if dma fails... 227 }
227 */ 228 /* pio will be performed by ide_pio_bytes() which handles sg fine */
228 pc->req_xfer = pc->buf_size = rq->data_len; 229 pc->buf = NULL;
230 pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
229} 231}
230 232
231static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, 233static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
257 goto out_end; 259 goto out_end;
258 } 260 }
259 if (blk_fs_request(rq)) { 261 if (blk_fs_request(rq)) {
260 if (((long)rq->sector % floppy->bs_factor) || 262 if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
261 (rq->nr_sectors % floppy->bs_factor)) { 263 (blk_rq_sectors(rq) % floppy->bs_factor)) {
262 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
263 drive->name); 265 drive->name);
264 goto out_end; 266 goto out_end;
265 } 267 }
266 pc = &floppy->queued_pc; 268 pc = &floppy->queued_pc;
267 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 269 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
268 } else if (blk_special_request(rq)) { 270 } else if (blk_special_request(rq) || blk_sense_request(rq)) {
269 pc = (struct ide_atapi_pc *) rq->buffer; 271 pc = (struct ide_atapi_pc *)rq->special;
270 } else if (blk_pc_request(rq)) { 272 } else if (blk_pc_request(rq)) {
271 pc = &floppy->queued_pc; 273 pc = &floppy->queued_pc;
272 idefloppy_blockpc_cmd(floppy, pc, rq); 274 idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
275 goto out_end; 277 goto out_end;
276 } 278 }
277 279
280 ide_prep_sense(drive, rq);
281
278 memset(&cmd, 0, sizeof(cmd)); 282 memset(&cmd, 0, sizeof(cmd));
279 283
280 if (rq_data_dir(rq)) 284 if (rq_data_dir(rq))
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 6415a2e2ba8..bba4297f2f0 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
116unsigned int ide_rq_bytes(struct request *rq) 116unsigned int ide_rq_bytes(struct request *rq)
117{ 117{
118 if (blk_pc_request(rq)) 118 if (blk_pc_request(rq))
119 return rq->data_len; 119 return blk_rq_bytes(rq);
120 else 120 else
121 return rq->hard_cur_sectors << 9; 121 return blk_rq_cur_sectors(rq) << 9;
122} 122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes); 123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124 124
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
133 * and complete the whole request right now 133 * and complete the whole request right now
134 */ 134 */
135 if (blk_noretry_request(rq) && error <= 0) 135 if (blk_noretry_request(rq) && error <= 0)
136 nr_bytes = rq->hard_nr_sectors << 9; 136 nr_bytes = blk_rq_sectors(rq) << 9;
137 137
138 rc = ide_end_rq(drive, rq, error, nr_bytes); 138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0) 139 if (rc == 0)
@@ -248,14 +248,7 @@ void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
248 struct scatterlist *sg = hwif->sg_table; 248 struct scatterlist *sg = hwif->sg_table;
249 struct request *rq = cmd->rq; 249 struct request *rq = cmd->rq;
250 250
251 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 251 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
252 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
253 cmd->sg_nents = 1;
254 } else if (!rq->bio) {
255 sg_init_one(sg, rq->data, rq->data_len);
256 cmd->sg_nents = 1;
257 } else
258 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
259} 252}
260EXPORT_SYMBOL_GPL(ide_map_sg); 253EXPORT_SYMBOL_GPL(ide_map_sg);
261 254
@@ -286,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
286 279
287 if (cmd) { 280 if (cmd) {
288 if (cmd->protocol == ATA_PROT_PIO) { 281 if (cmd->protocol == ATA_PROT_PIO) {
289 ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 282 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
290 ide_map_sg(drive, cmd); 283 ide_map_sg(drive, cmd);
291 } 284 }
292 285
@@ -371,7 +364,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
371 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 364 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
372 return execute_drive_cmd(drive, rq); 365 return execute_drive_cmd(drive, rq);
373 else if (blk_pm_request(rq)) { 366 else if (blk_pm_request(rq)) {
374 struct request_pm_state *pm = rq->data; 367 struct request_pm_state *pm = rq->special;
375#ifdef DEBUG_PM 368#ifdef DEBUG_PM
376 printk("%s: start_power_step(step: %d)\n", 369 printk("%s: start_power_step(step: %d)\n",
377 drive->name, pm->pm_step); 370 drive->name, pm->pm_step);
@@ -394,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
394 387
395 drv = *(struct ide_driver **)rq->rq_disk->private_data; 388 drv = *(struct ide_driver **)rq->rq_disk->private_data;
396 389
397 return drv->do_request(drive, rq, rq->sector); 390 return drv->do_request(drive, rq, blk_rq_pos(rq));
398 } 391 }
399 return do_special(drive); 392 return do_special(drive);
400kill_rq: 393kill_rq:
@@ -484,6 +477,9 @@ void do_ide_request(struct request_queue *q)
484 477
485 spin_unlock_irq(q->queue_lock); 478 spin_unlock_irq(q->queue_lock);
486 479
480 /* HLD do_request() callback might sleep, make sure it's okay */
481 might_sleep();
482
487 if (ide_lock_host(host, hwif)) 483 if (ide_lock_host(host, hwif))
488 goto plug_device_2; 484 goto plug_device_2;
489 485
@@ -491,10 +487,10 @@ void do_ide_request(struct request_queue *q)
491 487
492 if (!ide_lock_port(hwif)) { 488 if (!ide_lock_port(hwif)) {
493 ide_hwif_t *prev_port; 489 ide_hwif_t *prev_port;
490
491 WARN_ON_ONCE(hwif->rq);
494repeat: 492repeat:
495 prev_port = hwif->host->cur_port; 493 prev_port = hwif->host->cur_port;
496 hwif->rq = NULL;
497
498 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 494 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
499 time_after(drive->sleep, jiffies)) { 495 time_after(drive->sleep, jiffies)) {
500 ide_unlock_port(hwif); 496 ide_unlock_port(hwif);
@@ -523,7 +519,9 @@ repeat:
523 * we know that the queue isn't empty, but this can happen 519 * we know that the queue isn't empty, but this can happen
524 * if the q->prep_rq_fn() decides to kill a request 520 * if the q->prep_rq_fn() decides to kill a request
525 */ 521 */
526 rq = elv_next_request(drive->queue); 522 if (!rq)
523 rq = blk_fetch_request(drive->queue);
524
527 spin_unlock_irq(q->queue_lock); 525 spin_unlock_irq(q->queue_lock);
528 spin_lock_irq(&hwif->lock); 526 spin_lock_irq(&hwif->lock);
529 527
@@ -535,7 +533,7 @@ repeat:
535 /* 533 /*
536 * Sanity: don't accept a request that isn't a PM request 534 * Sanity: don't accept a request that isn't a PM request
537 * if we are currently power managed. This is very important as 535 * if we are currently power managed. This is very important as
538 * blk_stop_queue() doesn't prevent the elv_next_request() 536 * blk_stop_queue() doesn't prevent the blk_fetch_request()
539 * above to return us whatever is in the queue. Since we call 537 * above to return us whatever is in the queue. Since we call
540 * ide_do_request() ourselves, we end up taking requests while 538 * ide_do_request() ourselves, we end up taking requests while
541 * the queue is blocked... 539 * the queue is blocked...
@@ -559,8 +557,11 @@ repeat:
559 startstop = start_request(drive, rq); 557 startstop = start_request(drive, rq);
560 spin_lock_irq(&hwif->lock); 558 spin_lock_irq(&hwif->lock);
561 559
562 if (startstop == ide_stopped) 560 if (startstop == ide_stopped) {
561 rq = hwif->rq;
562 hwif->rq = NULL;
563 goto repeat; 563 goto repeat;
564 }
564 } else 565 } else
565 goto plug_device; 566 goto plug_device;
566out: 567out:
@@ -576,18 +577,24 @@ plug_device:
576plug_device_2: 577plug_device_2:
577 spin_lock_irq(q->queue_lock); 578 spin_lock_irq(q->queue_lock);
578 579
580 if (rq)
581 blk_requeue_request(q, rq);
579 if (!elv_queue_empty(q)) 582 if (!elv_queue_empty(q))
580 blk_plug_device(q); 583 blk_plug_device(q);
581} 584}
582 585
583static void ide_plug_device(ide_drive_t *drive) 586static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
584{ 587{
585 struct request_queue *q = drive->queue; 588 struct request_queue *q = drive->queue;
586 unsigned long flags; 589 unsigned long flags;
587 590
588 spin_lock_irqsave(q->queue_lock, flags); 591 spin_lock_irqsave(q->queue_lock, flags);
592
593 if (rq)
594 blk_requeue_request(q, rq);
589 if (!elv_queue_empty(q)) 595 if (!elv_queue_empty(q))
590 blk_plug_device(q); 596 blk_plug_device(q);
597
591 spin_unlock_irqrestore(q->queue_lock, flags); 598 spin_unlock_irqrestore(q->queue_lock, flags);
592} 599}
593 600
@@ -636,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
636 unsigned long flags; 643 unsigned long flags;
637 int wait = -1; 644 int wait = -1;
638 int plug_device = 0; 645 int plug_device = 0;
646 struct request *uninitialized_var(rq_in_flight);
639 647
640 spin_lock_irqsave(&hwif->lock, flags); 648 spin_lock_irqsave(&hwif->lock, flags);
641 649
@@ -697,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
697 spin_lock_irq(&hwif->lock); 705 spin_lock_irq(&hwif->lock);
698 enable_irq(hwif->irq); 706 enable_irq(hwif->irq);
699 if (startstop == ide_stopped && hwif->polling == 0) { 707 if (startstop == ide_stopped && hwif->polling == 0) {
708 rq_in_flight = hwif->rq;
709 hwif->rq = NULL;
700 ide_unlock_port(hwif); 710 ide_unlock_port(hwif);
701 plug_device = 1; 711 plug_device = 1;
702 } 712 }
@@ -705,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
705 715
706 if (plug_device) { 716 if (plug_device) {
707 ide_unlock_host(hwif->host); 717 ide_unlock_host(hwif->host);
708 ide_plug_device(drive); 718 ide_requeue_and_plug(drive, rq_in_flight);
709 } 719 }
710} 720}
711 721
@@ -791,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
791 ide_startstop_t startstop; 801 ide_startstop_t startstop;
792 irqreturn_t irq_ret = IRQ_NONE; 802 irqreturn_t irq_ret = IRQ_NONE;
793 int plug_device = 0; 803 int plug_device = 0;
804 struct request *uninitialized_var(rq_in_flight);
794 805
795 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 806 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
796 if (hwif != host->cur_port) 807 if (hwif != host->cur_port)
@@ -870,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
870 */ 881 */
871 if (startstop == ide_stopped && hwif->polling == 0) { 882 if (startstop == ide_stopped && hwif->polling == 0) {
872 BUG_ON(hwif->handler); 883 BUG_ON(hwif->handler);
884 rq_in_flight = hwif->rq;
885 hwif->rq = NULL;
873 ide_unlock_port(hwif); 886 ide_unlock_port(hwif);
874 plug_device = 1; 887 plug_device = 1;
875 } 888 }
@@ -879,7 +892,7 @@ out:
879out_early: 892out_early:
880 if (plug_device) { 893 if (plug_device) {
881 ide_unlock_host(hwif->host); 894 ide_unlock_host(hwif->host);
882 ide_plug_device(drive); 895 ide_requeue_and_plug(drive, rq_in_flight);
883 } 896 }
884 897
885 return irq_ret; 898 return irq_ret;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index c1c25ebbaa1..5991b23793f 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
231 rq->cmd_type = REQ_TYPE_SPECIAL; 231 rq->cmd_type = REQ_TYPE_SPECIAL;
232 rq->cmd_len = 1; 232 rq->cmd_len = 1;
233 rq->cmd[0] = REQ_DRIVE_RESET; 233 rq->cmd[0] = REQ_DRIVE_RESET;
234 rq->cmd_flags |= REQ_SOFTBARRIER;
235 if (blk_execute_rq(drive->queue, NULL, rq, 1)) 234 if (blk_execute_rq(drive->queue, NULL, rq, 1))
236 ret = rq->errors; 235 ret = rq->errors;
237 blk_put_request(rq); 236 blk_put_request(rq);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 2148df836ce..e386a32dc9b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
96 96
97 if (rq) 97 if (rq)
98 printk(KERN_CONT ", sector=%llu", 98 printk(KERN_CONT ", sector=%llu",
99 (unsigned long long)rq->sector); 99 (unsigned long long)blk_rq_pos(rq));
100 } 100 }
101 printk(KERN_CONT "\n"); 101 printk(KERN_CONT "\n");
102} 102}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 310d03f2b5b..a914023d6d0 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
24 start_queue = 1; 24 start_queue = 1;
25 spin_unlock_irq(&hwif->lock); 25 spin_unlock_irq(&hwif->lock);
26 26
27 if (start_queue) { 27 if (start_queue)
28 spin_lock_irq(q->queue_lock); 28 blk_run_queue(q);
29 blk_start_queueing(q);
30 spin_unlock_irq(q->queue_lock);
31 }
32 return; 29 return;
33 } 30 }
34 spin_unlock_irq(&hwif->lock); 31 spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0d8a151c0a0..ba1488bd843 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -7,7 +7,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7 ide_hwif_t *hwif = drive->hwif; 7 ide_hwif_t *hwif = drive->hwif;
8 struct request *rq; 8 struct request *rq;
9 struct request_pm_state rqpm; 9 struct request_pm_state rqpm;
10 struct ide_cmd cmd;
11 int ret; 10 int ret;
12 11
13 /* call ACPI _GTM only once */ 12 /* call ACPI _GTM only once */
@@ -15,11 +14,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
15 ide_acpi_get_timing(hwif); 14 ide_acpi_get_timing(hwif);
16 15
17 memset(&rqpm, 0, sizeof(rqpm)); 16 memset(&rqpm, 0, sizeof(rqpm));
18 memset(&cmd, 0, sizeof(cmd));
19 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 17 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
20 rq->cmd_type = REQ_TYPE_PM_SUSPEND; 18 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
21 rq->special = &cmd; 19 rq->special = &rqpm;
22 rq->data = &rqpm;
23 rqpm.pm_step = IDE_PM_START_SUSPEND; 20 rqpm.pm_step = IDE_PM_START_SUSPEND;
24 if (mesg.event == PM_EVENT_PRETHAW) 21 if (mesg.event == PM_EVENT_PRETHAW)
25 mesg.event = PM_EVENT_FREEZE; 22 mesg.event = PM_EVENT_FREEZE;
@@ -41,7 +38,6 @@ int generic_ide_resume(struct device *dev)
41 ide_hwif_t *hwif = drive->hwif; 38 ide_hwif_t *hwif = drive->hwif;
42 struct request *rq; 39 struct request *rq;
43 struct request_pm_state rqpm; 40 struct request_pm_state rqpm;
44 struct ide_cmd cmd;
45 int err; 41 int err;
46 42
47 /* call ACPI _PS0 / _STM only once */ 43 /* call ACPI _PS0 / _STM only once */
@@ -53,12 +49,10 @@ int generic_ide_resume(struct device *dev)
53 ide_acpi_exec_tfs(drive); 49 ide_acpi_exec_tfs(drive);
54 50
55 memset(&rqpm, 0, sizeof(rqpm)); 51 memset(&rqpm, 0, sizeof(rqpm));
56 memset(&cmd, 0, sizeof(cmd));
57 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 52 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
58 rq->cmd_type = REQ_TYPE_PM_RESUME; 53 rq->cmd_type = REQ_TYPE_PM_RESUME;
59 rq->cmd_flags |= REQ_PREEMPT; 54 rq->cmd_flags |= REQ_PREEMPT;
60 rq->special = &cmd; 55 rq->special = &rqpm;
61 rq->data = &rqpm;
62 rqpm.pm_step = IDE_PM_START_RESUME; 56 rqpm.pm_step = IDE_PM_START_RESUME;
63 rqpm.pm_state = PM_EVENT_ON; 57 rqpm.pm_state = PM_EVENT_ON;
64 58
@@ -77,7 +71,7 @@ int generic_ide_resume(struct device *dev)
77 71
78void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 72void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
79{ 73{
80 struct request_pm_state *pm = rq->data; 74 struct request_pm_state *pm = rq->special;
81 75
82#ifdef DEBUG_PM 76#ifdef DEBUG_PM
83 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 77 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +101,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
107 101
108ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 102ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
109{ 103{
110 struct request_pm_state *pm = rq->data; 104 struct request_pm_state *pm = rq->special;
111 struct ide_cmd *cmd = rq->special; 105 struct ide_cmd cmd = { };
112
113 memset(cmd, 0, sizeof(*cmd));
114 106
115 switch (pm->pm_step) { 107 switch (pm->pm_step) {
116 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 108 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
@@ -123,12 +115,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
123 return ide_stopped; 115 return ide_stopped;
124 } 116 }
125 if (ata_id_flush_ext_enabled(drive->id)) 117 if (ata_id_flush_ext_enabled(drive->id))
126 cmd->tf.command = ATA_CMD_FLUSH_EXT; 118 cmd.tf.command = ATA_CMD_FLUSH_EXT;
127 else 119 else
128 cmd->tf.command = ATA_CMD_FLUSH; 120 cmd.tf.command = ATA_CMD_FLUSH;
129 goto out_do_tf; 121 goto out_do_tf;
130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 122 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
131 cmd->tf.command = ATA_CMD_STANDBYNOW1; 123 cmd.tf.command = ATA_CMD_STANDBYNOW1;
132 goto out_do_tf; 124 goto out_do_tf;
133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 125 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
134 ide_set_max_pio(drive); 126 ide_set_max_pio(drive);
@@ -141,7 +133,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
141 ide_complete_power_step(drive, rq); 133 ide_complete_power_step(drive, rq);
142 return ide_stopped; 134 return ide_stopped;
143 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 135 case IDE_PM_IDLE: /* Resume step 2 (idle) */
144 cmd->tf.command = ATA_CMD_IDLEIMMEDIATE; 136 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
145 goto out_do_tf; 137 goto out_do_tf;
146 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 138 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
147 /* 139 /*
@@ -163,11 +155,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
163 return ide_stopped; 155 return ide_stopped;
164 156
165out_do_tf: 157out_do_tf:
166 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 158 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
167 cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 159 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
168 cmd->protocol = ATA_PROT_NODATA; 160 cmd.protocol = ATA_PROT_NODATA;
169 161
170 return do_rw_taskfile(drive, cmd); 162 return do_rw_taskfile(drive, &cmd);
171} 163}
172 164
173/** 165/**
@@ -181,7 +173,7 @@ out_do_tf:
181void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 173void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
182{ 174{
183 struct request_queue *q = drive->queue; 175 struct request_queue *q = drive->queue;
184 struct request_pm_state *pm = rq->data; 176 struct request_pm_state *pm = rq->special;
185 unsigned long flags; 177 unsigned long flags;
186 178
187 ide_complete_power_step(drive, rq); 179 ide_complete_power_step(drive, rq);
@@ -207,7 +199,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
207 199
208void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 200void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
209{ 201{
210 struct request_pm_state *pm = rq->data; 202 struct request_pm_state *pm = rq->special;
211 203
212 if (blk_pm_suspend_request(rq) && 204 if (blk_pm_suspend_request(rq) &&
213 pm->pm_step == IDE_PM_START_SUSPEND) 205 pm->pm_step == IDE_PM_START_SUSPEND)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3a53e0834cf..683ff37d407 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -131,13 +131,6 @@ enum {
131 IDETAPE_DIR_WRITE = (1 << 2), 131 IDETAPE_DIR_WRITE = (1 << 2),
132}; 132};
133 133
134struct idetape_bh {
135 u32 b_size;
136 atomic_t b_count;
137 struct idetape_bh *b_reqnext;
138 char *b_data;
139};
140
141/* Tape door status */ 134/* Tape door status */
142#define DOOR_UNLOCKED 0 135#define DOOR_UNLOCKED 0
143#define DOOR_LOCKED 1 136#define DOOR_LOCKED 1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
219 212
220 /* Data buffer size chosen based on the tape's recommendation */ 213 /* Data buffer size chosen based on the tape's recommendation */
221 int buffer_size; 214 int buffer_size;
222 /* merge buffer */ 215 /* Staging buffer of buffer_size bytes */
223 struct idetape_bh *merge_bh; 216 void *buf;
224 /* size of the merge buffer */ 217 /* The read/write cursor */
225 int merge_bh_size; 218 void *cur;
226 /* pointer to current buffer head within the merge buffer */ 219 /* The number of valid bytes in buf */
227 struct idetape_bh *bh; 220 size_t valid;
228 char *b_data;
229 int b_count;
230
231 int pages_per_buffer;
232 /* Wasted space in each stage */
233 int excess_bh_size;
234 221
235 /* Measures average tape speed */ 222 /* Measures average tape speed */
236 unsigned long avg_time; 223 unsigned long avg_time;
@@ -297,84 +284,6 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
297 return tape; 284 return tape;
298} 285}
299 286
300static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
301 unsigned int bcount)
302{
303 struct idetape_bh *bh = pc->bh;
304 int count;
305
306 while (bcount) {
307 if (bh == NULL)
308 break;
309 count = min(
310 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
311 bcount);
312 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
313 atomic_read(&bh->b_count), count);
314 bcount -= count;
315 atomic_add(count, &bh->b_count);
316 if (atomic_read(&bh->b_count) == bh->b_size) {
317 bh = bh->b_reqnext;
318 if (bh)
319 atomic_set(&bh->b_count, 0);
320 }
321 }
322
323 pc->bh = bh;
324
325 return bcount;
326}
327
328static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
329 unsigned int bcount)
330{
331 struct idetape_bh *bh = pc->bh;
332 int count;
333
334 while (bcount) {
335 if (bh == NULL)
336 break;
337 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
338 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
339 bcount -= count;
340 pc->b_data += count;
341 pc->b_count -= count;
342 if (!pc->b_count) {
343 bh = bh->b_reqnext;
344 pc->bh = bh;
345 if (bh) {
346 pc->b_data = bh->b_data;
347 pc->b_count = atomic_read(&bh->b_count);
348 }
349 }
350 }
351
352 return bcount;
353}
354
355static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
356{
357 struct idetape_bh *bh = pc->bh;
358 int count;
359 unsigned int bcount = pc->xferred;
360
361 if (pc->flags & PC_FLAG_WRITING)
362 return;
363 while (bcount) {
364 if (bh == NULL) {
365 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
366 __func__);
367 return;
368 }
369 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
370 atomic_set(&bh->b_count, count);
371 if (atomic_read(&bh->b_count) == bh->b_size)
372 bh = bh->b_reqnext;
373 bcount -= count;
374 }
375 pc->bh = bh;
376}
377
378/* 287/*
379 * called on each failed packet command retry to analyze the request sense. We 288 * called on each failed packet command retry to analyze the request sense. We
380 * currently do not utilize this information. 289 * currently do not utilize this information.
@@ -392,12 +301,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
392 pc->c[0], tape->sense_key, tape->asc, tape->ascq); 301 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
393 302
394 /* Correct pc->xferred by asking the tape. */ 303 /* Correct pc->xferred by asking the tape. */
395 if (pc->flags & PC_FLAG_DMA_ERROR) { 304 if (pc->flags & PC_FLAG_DMA_ERROR)
396 pc->xferred = pc->req_xfer - 305 pc->xferred = pc->req_xfer -
397 tape->blk_size * 306 tape->blk_size *
398 get_unaligned_be32(&sense[3]); 307 get_unaligned_be32(&sense[3]);
399 idetape_update_buffers(drive, pc);
400 }
401 308
402 /* 309 /*
403 * If error was the result of a zero-length read or write command, 310 * If error was the result of a zero-length read or write command,
@@ -436,29 +343,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
436 } 343 }
437} 344}
438 345
439/* Free data buffers completely. */
440static void ide_tape_kfree_buffer(idetape_tape_t *tape)
441{
442 struct idetape_bh *prev_bh, *bh = tape->merge_bh;
443
444 while (bh) {
445 u32 size = bh->b_size;
446
447 while (size) {
448 unsigned int order = fls(size >> PAGE_SHIFT)-1;
449
450 if (bh->b_data)
451 free_pages((unsigned long)bh->b_data, order);
452
453 size &= (order-1);
454 bh->b_data += (1 << order) * PAGE_SIZE;
455 }
456 prev_bh = bh;
457 bh = bh->b_reqnext;
458 kfree(prev_bh);
459 }
460}
461
462static void ide_tape_handle_dsc(ide_drive_t *); 346static void ide_tape_handle_dsc(ide_drive_t *);
463 347
464static int ide_tape_callback(ide_drive_t *drive, int dsc) 348static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
496 } 380 }
497 381
498 tape->first_frame += blocks; 382 tape->first_frame += blocks;
499 rq->current_nr_sectors -= blocks; 383 rq->resid_len -= blocks * tape->blk_size;
500 384
501 if (pc->error) { 385 if (pc->error) {
502 uptodate = 0; 386 uptodate = 0;
@@ -558,19 +442,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
558 idetape_postpone_request(drive); 442 idetape_postpone_request(drive);
559} 443}
560 444
561static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
562 unsigned int bcount, int write)
563{
564 unsigned int bleft;
565
566 if (write)
567 bleft = idetape_output_buffers(drive, pc, bcount);
568 else
569 bleft = idetape_input_buffers(drive, pc, bcount);
570
571 return bcount - bleft;
572}
573
574/* 445/*
575 * Packet Command Interface 446 * Packet Command Interface
576 * 447 *
@@ -622,6 +493,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
622 493
623 if (pc->retries > IDETAPE_MAX_PC_RETRIES || 494 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
624 (pc->flags & PC_FLAG_ABORT)) { 495 (pc->flags & PC_FLAG_ABORT)) {
496 unsigned int done = blk_rq_bytes(drive->hwif->rq);
497
625 /* 498 /*
626 * We will "abort" retrying a packet command in case legitimate 499 * We will "abort" retrying a packet command in case legitimate
627 * error code was received (crossing a filemark, or end of the 500 * error code was received (crossing a filemark, or end of the
@@ -641,8 +514,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
641 /* Giving up */ 514 /* Giving up */
642 pc->error = IDE_DRV_ERROR_GENERAL; 515 pc->error = IDE_DRV_ERROR_GENERAL;
643 } 516 }
517
644 drive->failed_pc = NULL; 518 drive->failed_pc = NULL;
645 drive->pc_callback(drive, 0); 519 drive->pc_callback(drive, 0);
520 ide_complete_rq(drive, -EIO, done);
646 return ide_stopped; 521 return ide_stopped;
647 } 522 }
648 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 523 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -695,7 +570,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
695 printk(KERN_ERR "ide-tape: %s: I/O error, ", 570 printk(KERN_ERR "ide-tape: %s: I/O error, ",
696 tape->name); 571 tape->name);
697 /* Retry operation */ 572 /* Retry operation */
698 ide_retry_pc(drive, tape->disk); 573 ide_retry_pc(drive);
699 return ide_stopped; 574 return ide_stopped;
700 } 575 }
701 pc->error = 0; 576 pc->error = 0;
@@ -711,27 +586,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
711 struct ide_atapi_pc *pc, struct request *rq, 586 struct ide_atapi_pc *pc, struct request *rq,
712 u8 opcode) 587 u8 opcode)
713{ 588{
714 struct idetape_bh *bh = (struct idetape_bh *)rq->special; 589 unsigned int length = blk_rq_sectors(rq);
715 unsigned int length = rq->current_nr_sectors;
716 590
717 ide_init_pc(pc); 591 ide_init_pc(pc);
718 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
719 pc->c[1] = 1; 593 pc->c[1] = 1;
720 pc->bh = bh;
721 pc->buf = NULL; 594 pc->buf = NULL;
722 pc->buf_size = length * tape->blk_size; 595 pc->buf_size = length * tape->blk_size;
723 pc->req_xfer = pc->buf_size; 596 pc->req_xfer = pc->buf_size;
724 if (pc->req_xfer == tape->buffer_size) 597 if (pc->req_xfer == tape->buffer_size)
725 pc->flags |= PC_FLAG_DMA_OK; 598 pc->flags |= PC_FLAG_DMA_OK;
726 599
727 if (opcode == READ_6) { 600 if (opcode == READ_6)
728 pc->c[0] = READ_6; 601 pc->c[0] = READ_6;
729 atomic_set(&bh->b_count, 0); 602 else if (opcode == WRITE_6) {
730 } else if (opcode == WRITE_6) {
731 pc->c[0] = WRITE_6; 603 pc->c[0] = WRITE_6;
732 pc->flags |= PC_FLAG_WRITING; 604 pc->flags |= PC_FLAG_WRITING;
733 pc->b_data = bh->b_data;
734 pc->b_count = atomic_read(&bh->b_count);
735 } 605 }
736 606
737 memcpy(rq->cmd, pc->c, 12); 607 memcpy(rq->cmd, pc->c, 12);
@@ -747,12 +617,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
747 struct ide_cmd cmd; 617 struct ide_cmd cmd;
748 u8 stat; 618 u8 stat;
749 619
750 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu," 620 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
751 " current_nr_sectors: %u\n", 621 (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
752 (unsigned long long)rq->sector, rq->nr_sectors,
753 rq->current_nr_sectors);
754 622
755 if (!blk_special_request(rq)) { 623 if (!(blk_special_request(rq) || blk_sense_request(rq))) {
756 /* We do not support buffer cache originated requests. */ 624 /* We do not support buffer cache originated requests. */
757 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " 625 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
758 "request queue (%d)\n", drive->name, rq->cmd_type); 626 "request queue (%d)\n", drive->name, rq->cmd_type);
@@ -828,7 +696,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
828 goto out; 696 goto out;
829 } 697 }
830 if (rq->cmd[13] & REQ_IDETAPE_PC1) { 698 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
831 pc = (struct ide_atapi_pc *) rq->buffer; 699 pc = (struct ide_atapi_pc *)rq->special;
832 rq->cmd[13] &= ~(REQ_IDETAPE_PC1); 700 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
833 rq->cmd[13] |= REQ_IDETAPE_PC2; 701 rq->cmd[13] |= REQ_IDETAPE_PC2;
834 goto out; 702 goto out;
@@ -840,6 +708,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
840 BUG(); 708 BUG();
841 709
842out: 710out:
711 /* prepare sense request for this command */
712 ide_prep_sense(drive, rq);
713
843 memset(&cmd, 0, sizeof(cmd)); 714 memset(&cmd, 0, sizeof(cmd));
844 715
845 if (rq_data_dir(rq)) 716 if (rq_data_dir(rq))
@@ -847,167 +718,10 @@ out:
847 718
848 cmd.rq = rq; 719 cmd.rq = rq;
849 720
850 return ide_tape_issue_pc(drive, &cmd, pc); 721 ide_init_sg_cmd(&cmd, pc->req_xfer);
851} 722 ide_map_sg(drive, &cmd);
852
853/*
854 * The function below uses __get_free_pages to allocate a data buffer of size
855 * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
856 * much as possible.
857 *
858 * It returns a pointer to the newly allocated buffer, or NULL in case of
859 * failure.
860 */
861static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
862 int full, int clear)
863{
864 struct idetape_bh *prev_bh, *bh, *merge_bh;
865 int pages = tape->pages_per_buffer;
866 unsigned int order, b_allocd;
867 char *b_data = NULL;
868
869 merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
870 bh = merge_bh;
871 if (bh == NULL)
872 goto abort;
873
874 order = fls(pages) - 1;
875 bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
876 if (!bh->b_data)
877 goto abort;
878 b_allocd = (1 << order) * PAGE_SIZE;
879 pages &= (order-1);
880
881 if (clear)
882 memset(bh->b_data, 0, b_allocd);
883 bh->b_reqnext = NULL;
884 bh->b_size = b_allocd;
885 atomic_set(&bh->b_count, full ? bh->b_size : 0);
886
887 while (pages) {
888 order = fls(pages) - 1;
889 b_data = (char *) __get_free_pages(GFP_KERNEL, order);
890 if (!b_data)
891 goto abort;
892 b_allocd = (1 << order) * PAGE_SIZE;
893
894 if (clear)
895 memset(b_data, 0, b_allocd);
896
897 /* newly allocated page frames below buffer header or ...*/
898 if (bh->b_data == b_data + b_allocd) {
899 bh->b_size += b_allocd;
900 bh->b_data -= b_allocd;
901 if (full)
902 atomic_add(b_allocd, &bh->b_count);
903 continue;
904 }
905 /* they are above the header */
906 if (b_data == bh->b_data + bh->b_size) {
907 bh->b_size += b_allocd;
908 if (full)
909 atomic_add(b_allocd, &bh->b_count);
910 continue;
911 }
912 prev_bh = bh;
913 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
914 if (!bh) {
915 free_pages((unsigned long) b_data, order);
916 goto abort;
917 }
918 bh->b_reqnext = NULL;
919 bh->b_data = b_data;
920 bh->b_size = b_allocd;
921 atomic_set(&bh->b_count, full ? bh->b_size : 0);
922 prev_bh->b_reqnext = bh;
923
924 pages &= (order-1);
925 }
926
927 bh->b_size -= tape->excess_bh_size;
928 if (full)
929 atomic_sub(tape->excess_bh_size, &bh->b_count);
930 return merge_bh;
931abort:
932 ide_tape_kfree_buffer(tape);
933 return NULL;
934}
935 723
936static int idetape_copy_stage_from_user(idetape_tape_t *tape, 724 return ide_tape_issue_pc(drive, &cmd, pc);
937 const char __user *buf, int n)
938{
939 struct idetape_bh *bh = tape->bh;
940 int count;
941 int ret = 0;
942
943 while (n) {
944 if (bh == NULL) {
945 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
946 __func__);
947 return 1;
948 }
949 count = min((unsigned int)
950 (bh->b_size - atomic_read(&bh->b_count)),
951 (unsigned int)n);
952 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
953 count))
954 ret = 1;
955 n -= count;
956 atomic_add(count, &bh->b_count);
957 buf += count;
958 if (atomic_read(&bh->b_count) == bh->b_size) {
959 bh = bh->b_reqnext;
960 if (bh)
961 atomic_set(&bh->b_count, 0);
962 }
963 }
964 tape->bh = bh;
965 return ret;
966}
967
968static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
969 int n)
970{
971 struct idetape_bh *bh = tape->bh;
972 int count;
973 int ret = 0;
974
975 while (n) {
976 if (bh == NULL) {
977 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
978 __func__);
979 return 1;
980 }
981 count = min(tape->b_count, n);
982 if (copy_to_user(buf, tape->b_data, count))
983 ret = 1;
984 n -= count;
985 tape->b_data += count;
986 tape->b_count -= count;
987 buf += count;
988 if (!tape->b_count) {
989 bh = bh->b_reqnext;
990 tape->bh = bh;
991 if (bh) {
992 tape->b_data = bh->b_data;
993 tape->b_count = atomic_read(&bh->b_count);
994 }
995 }
996 }
997 return ret;
998}
999
1000static void idetape_init_merge_buffer(idetape_tape_t *tape)
1001{
1002 struct idetape_bh *bh = tape->merge_bh;
1003 tape->bh = tape->merge_bh;
1004
1005 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1006 atomic_set(&bh->b_count, 0);
1007 else {
1008 tape->b_data = bh->b_data;
1009 tape->b_count = atomic_read(&bh->b_count);
1010 }
1011} 725}
1012 726
1013/* 727/*
@@ -1107,10 +821,10 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1107 return; 821 return;
1108 822
1109 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); 823 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
1110 tape->merge_bh_size = 0; 824 tape->valid = 0;
1111 if (tape->merge_bh != NULL) { 825 if (tape->buf != NULL) {
1112 ide_tape_kfree_buffer(tape); 826 kfree(tape->buf);
1113 tape->merge_bh = NULL; 827 tape->buf = NULL;
1114 } 828 }
1115 829
1116 tape->chrdev_dir = IDETAPE_DIR_NONE; 830 tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1164,36 +878,43 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
1164 * Generate a read/write request for the block device interface and wait for it 878 * Generate a read/write request for the block device interface and wait for it
1165 * to be serviced. 879 * to be serviced.
1166 */ 880 */
1167static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, 881static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
1168 struct idetape_bh *bh)
1169{ 882{
1170 idetape_tape_t *tape = drive->driver_data; 883 idetape_tape_t *tape = drive->driver_data;
1171 struct request *rq; 884 struct request *rq;
1172 int ret, errors; 885 int ret;
1173 886
1174 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); 887 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
888 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
889 BUG_ON(size < 0 || size % tape->blk_size);
1175 890
1176 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 891 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1177 rq->cmd_type = REQ_TYPE_SPECIAL; 892 rq->cmd_type = REQ_TYPE_SPECIAL;
1178 rq->cmd[13] = cmd; 893 rq->cmd[13] = cmd;
1179 rq->rq_disk = tape->disk; 894 rq->rq_disk = tape->disk;
1180 rq->special = (void *)bh;
1181 rq->sector = tape->first_frame;
1182 rq->nr_sectors = blocks;
1183 rq->current_nr_sectors = blocks;
1184 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1185 895
1186 errors = rq->errors; 896 if (size) {
1187 ret = tape->blk_size * (blocks - rq->current_nr_sectors); 897 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
1188 blk_put_request(rq); 898 __GFP_WAIT);
899 if (ret)
900 goto out_put;
901 }
1189 902
1190 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) 903 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1191 return 0;
1192 904
1193 if (tape->merge_bh) 905 /* calculate the number of transferred bytes and update buffer state */
1194 idetape_init_merge_buffer(tape); 906 size -= rq->resid_len;
1195 if (errors == IDE_DRV_ERROR_GENERAL) 907 tape->cur = tape->buf;
1196 return -EIO; 908 if (cmd == REQ_IDETAPE_READ)
909 tape->valid = size;
910 else
911 tape->valid = 0;
912
913 ret = size;
914 if (rq->errors == IDE_DRV_ERROR_GENERAL)
915 ret = -EIO;
916out_put:
917 blk_put_request(rq);
1197 return ret; 918 return ret;
1198} 919}
1199 920
@@ -1230,153 +951,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
1230 pc->flags |= PC_FLAG_WAIT_FOR_DSC; 951 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1231} 952}
1232 953
1233/* Queue up a character device originated write request. */
1234static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
1235{
1236 idetape_tape_t *tape = drive->driver_data;
1237
1238 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
1239
1240 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1241 blocks, tape->merge_bh);
1242}
1243
1244static void ide_tape_flush_merge_buffer(ide_drive_t *drive) 954static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
1245{ 955{
1246 idetape_tape_t *tape = drive->driver_data; 956 idetape_tape_t *tape = drive->driver_data;
1247 int blocks, min;
1248 struct idetape_bh *bh;
1249 957
1250 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 958 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
1251 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer" 959 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
1252 " but we are not writing.\n"); 960 " but we are not writing.\n");
1253 return; 961 return;
1254 } 962 }
1255 if (tape->merge_bh_size > tape->buffer_size) { 963 if (tape->buf) {
1256 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); 964 size_t aligned = roundup(tape->valid, tape->blk_size);
1257 tape->merge_bh_size = tape->buffer_size; 965
1258 } 966 memset(tape->cur, 0, aligned - tape->valid);
1259 if (tape->merge_bh_size) { 967 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
1260 blocks = tape->merge_bh_size / tape->blk_size; 968 kfree(tape->buf);
1261 if (tape->merge_bh_size % tape->blk_size) { 969 tape->buf = NULL;
1262 unsigned int i;
1263
1264 blocks++;
1265 i = tape->blk_size - tape->merge_bh_size %
1266 tape->blk_size;
1267 bh = tape->bh->b_reqnext;
1268 while (bh) {
1269 atomic_set(&bh->b_count, 0);
1270 bh = bh->b_reqnext;
1271 }
1272 bh = tape->bh;
1273 while (i) {
1274 if (bh == NULL) {
1275 printk(KERN_INFO "ide-tape: bug,"
1276 " bh NULL\n");
1277 break;
1278 }
1279 min = min(i, (unsigned int)(bh->b_size -
1280 atomic_read(&bh->b_count)));
1281 memset(bh->b_data + atomic_read(&bh->b_count),
1282 0, min);
1283 atomic_add(min, &bh->b_count);
1284 i -= min;
1285 bh = bh->b_reqnext;
1286 }
1287 }
1288 (void) idetape_add_chrdev_write_request(drive, blocks);
1289 tape->merge_bh_size = 0;
1290 }
1291 if (tape->merge_bh != NULL) {
1292 ide_tape_kfree_buffer(tape);
1293 tape->merge_bh = NULL;
1294 } 970 }
1295 tape->chrdev_dir = IDETAPE_DIR_NONE; 971 tape->chrdev_dir = IDETAPE_DIR_NONE;
1296} 972}
1297 973
1298static int idetape_init_read(ide_drive_t *drive) 974static int idetape_init_rw(ide_drive_t *drive, int dir)
1299{ 975{
1300 idetape_tape_t *tape = drive->driver_data; 976 idetape_tape_t *tape = drive->driver_data;
1301 int bytes_read; 977 int rc;
1302 978
1303 /* Initialize read operation */ 979 BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
1304 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1305 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
1306 ide_tape_flush_merge_buffer(drive);
1307 idetape_flush_tape_buffers(drive);
1308 }
1309 if (tape->merge_bh || tape->merge_bh_size) {
1310 printk(KERN_ERR "ide-tape: merge_bh_size should be"
1311 " 0 now\n");
1312 tape->merge_bh_size = 0;
1313 }
1314 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1315 if (!tape->merge_bh)
1316 return -ENOMEM;
1317 tape->chrdev_dir = IDETAPE_DIR_READ;
1318 980
1319 /* 981 if (tape->chrdev_dir == dir)
1320 * Issue a read 0 command to ensure that DSC handshake is 982 return 0;
1321 * switched from completion mode to buffer available mode.
1322 * No point in issuing this if DSC overlap isn't supported, some
1323 * drives (Seagate STT3401A) will return an error.
1324 */
1325 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1326 bytes_read = idetape_queue_rw_tail(drive,
1327 REQ_IDETAPE_READ, 0,
1328 tape->merge_bh);
1329 if (bytes_read < 0) {
1330 ide_tape_kfree_buffer(tape);
1331 tape->merge_bh = NULL;
1332 tape->chrdev_dir = IDETAPE_DIR_NONE;
1333 return bytes_read;
1334 }
1335 }
1336 }
1337 983
1338 return 0; 984 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1339} 985 ide_tape_discard_merge_buffer(drive, 1);
986 else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
987 ide_tape_flush_merge_buffer(drive);
988 idetape_flush_tape_buffers(drive);
989 }
1340 990
1341/* called from idetape_chrdev_read() to service a chrdev read request. */ 991 if (tape->buf || tape->valid) {
1342static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) 992 printk(KERN_ERR "ide-tape: valid should be 0 now\n");
1343{ 993 tape->valid = 0;
1344 idetape_tape_t *tape = drive->driver_data; 994 }
1345 995
1346 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 996 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
997 if (!tape->buf)
998 return -ENOMEM;
999 tape->chrdev_dir = dir;
1000 tape->cur = tape->buf;
1347 1001
1348 /* If we are at a filemark, return a read length of 0 */ 1002 /*
1349 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1003 * Issue a 0 rw command to ensure that DSC handshake is
1350 return 0; 1004 * switched from completion mode to buffer available mode. No
1351 1005 * point in issuing this if DSC overlap isn't supported, some
1352 idetape_init_read(drive); 1006 * drives (Seagate STT3401A) will return an error.
1007 */
1008 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1009 int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
1010 : REQ_IDETAPE_WRITE;
1011
1012 rc = idetape_queue_rw_tail(drive, cmd, 0);
1013 if (rc < 0) {
1014 kfree(tape->buf);
1015 tape->buf = NULL;
1016 tape->chrdev_dir = IDETAPE_DIR_NONE;
1017 return rc;
1018 }
1019 }
1353 1020
1354 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, 1021 return 0;
1355 tape->merge_bh);
1356} 1022}
1357 1023
1358static void idetape_pad_zeros(ide_drive_t *drive, int bcount) 1024static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
1359{ 1025{
1360 idetape_tape_t *tape = drive->driver_data; 1026 idetape_tape_t *tape = drive->driver_data;
1361 struct idetape_bh *bh; 1027
1362 int blocks; 1028 memset(tape->buf, 0, tape->buffer_size);
1363 1029
1364 while (bcount) { 1030 while (bcount) {
1365 unsigned int count; 1031 unsigned int count = min(tape->buffer_size, bcount);
1366 1032
1367 bh = tape->merge_bh; 1033 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
1368 count = min(tape->buffer_size, bcount);
1369 bcount -= count; 1034 bcount -= count;
1370 blocks = count / tape->blk_size;
1371 while (count) {
1372 atomic_set(&bh->b_count,
1373 min(count, (unsigned int)bh->b_size));
1374 memset(bh->b_data, 0, atomic_read(&bh->b_count));
1375 count -= atomic_read(&bh->b_count);
1376 bh = bh->b_reqnext;
1377 }
1378 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
1379 tape->merge_bh);
1380 } 1035 }
1381} 1036}
1382 1037
@@ -1456,7 +1111,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1456 } 1111 }
1457 1112
1458 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1113 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1459 tape->merge_bh_size = 0; 1114 tape->valid = 0;
1460 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1115 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1461 ++count; 1116 ++count;
1462 ide_tape_discard_merge_buffer(drive, 0); 1117 ide_tape_discard_merge_buffer(drive, 0);
@@ -1505,9 +1160,9 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1505{ 1160{
1506 struct ide_tape_obj *tape = file->private_data; 1161 struct ide_tape_obj *tape = file->private_data;
1507 ide_drive_t *drive = tape->drive; 1162 ide_drive_t *drive = tape->drive;
1508 ssize_t bytes_read, temp, actually_read = 0, rc; 1163 size_t done = 0;
1509 ssize_t ret = 0; 1164 ssize_t ret = 0;
1510 u16 ctl = *(u16 *)&tape->caps[12]; 1165 int rc;
1511 1166
1512 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1167 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1513 1168
@@ -1517,49 +1172,43 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1517 (count % tape->blk_size) == 0) 1172 (count % tape->blk_size) == 0)
1518 tape->user_bs_factor = count / tape->blk_size; 1173 tape->user_bs_factor = count / tape->blk_size;
1519 } 1174 }
1520 rc = idetape_init_read(drive); 1175
1176 rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
1521 if (rc < 0) 1177 if (rc < 0)
1522 return rc; 1178 return rc;
1523 if (count == 0) 1179
1524 return (0); 1180 while (done < count) {
1525 if (tape->merge_bh_size) { 1181 size_t todo;
1526 actually_read = min((unsigned int)(tape->merge_bh_size), 1182
1527 (unsigned int)count); 1183 /* refill if staging buffer is empty */
1528 if (idetape_copy_stage_to_user(tape, buf, actually_read)) 1184 if (!tape->valid) {
1529 ret = -EFAULT; 1185 /* If we are at a filemark, nothing more to read */
1530 buf += actually_read; 1186 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1531 tape->merge_bh_size -= actually_read; 1187 break;
1532 count -= actually_read; 1188 /* read */
1533 } 1189 if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
1534 while (count >= tape->buffer_size) { 1190 tape->buffer_size) <= 0)
1535 bytes_read = idetape_add_chrdev_read_request(drive, ctl); 1191 break;
1536 if (bytes_read <= 0) 1192 }
1537 goto finish; 1193
1538 if (idetape_copy_stage_to_user(tape, buf, bytes_read)) 1194 /* copy out */
1539 ret = -EFAULT; 1195 todo = min_t(size_t, count - done, tape->valid);
1540 buf += bytes_read; 1196 if (copy_to_user(buf + done, tape->cur, todo))
1541 count -= bytes_read;
1542 actually_read += bytes_read;
1543 }
1544 if (count) {
1545 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
1546 if (bytes_read <= 0)
1547 goto finish;
1548 temp = min((unsigned long)count, (unsigned long)bytes_read);
1549 if (idetape_copy_stage_to_user(tape, buf, temp))
1550 ret = -EFAULT; 1197 ret = -EFAULT;
1551 actually_read += temp; 1198
1552 tape->merge_bh_size = bytes_read-temp; 1199 tape->cur += todo;
1200 tape->valid -= todo;
1201 done += todo;
1553 } 1202 }
1554finish: 1203
1555 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { 1204 if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
1556 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1205 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1557 1206
1558 idetape_space_over_filemarks(drive, MTFSF, 1); 1207 idetape_space_over_filemarks(drive, MTFSF, 1);
1559 return 0; 1208 return 0;
1560 } 1209 }
1561 1210
1562 return ret ? ret : actually_read; 1211 return ret ? ret : done;
1563} 1212}
1564 1213
1565static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, 1214static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1567,9 +1216,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1567{ 1216{
1568 struct ide_tape_obj *tape = file->private_data; 1217 struct ide_tape_obj *tape = file->private_data;
1569 ide_drive_t *drive = tape->drive; 1218 ide_drive_t *drive = tape->drive;
1570 ssize_t actually_written = 0; 1219 size_t done = 0;
1571 ssize_t ret = 0; 1220 ssize_t ret = 0;
1572 u16 ctl = *(u16 *)&tape->caps[12]; 1221 int rc;
1573 1222
1574 /* The drive is write protected. */ 1223 /* The drive is write protected. */
1575 if (tape->write_prot) 1224 if (tape->write_prot)
@@ -1578,80 +1227,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1578 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1227 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1579 1228
1580 /* Initialize write operation */ 1229 /* Initialize write operation */
1581 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 1230 rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
1582 if (tape->chrdev_dir == IDETAPE_DIR_READ) 1231 if (rc < 0)
1583 ide_tape_discard_merge_buffer(drive, 1); 1232 return rc;
1584 if (tape->merge_bh || tape->merge_bh_size) {
1585 printk(KERN_ERR "ide-tape: merge_bh_size "
1586 "should be 0 now\n");
1587 tape->merge_bh_size = 0;
1588 }
1589 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1590 if (!tape->merge_bh)
1591 return -ENOMEM;
1592 tape->chrdev_dir = IDETAPE_DIR_WRITE;
1593 idetape_init_merge_buffer(tape);
1594 1233
1595 /* 1234 while (done < count) {
1596 * Issue a write 0 command to ensure that DSC handshake is 1235 size_t todo;
1597 * switched from completion mode to buffer available mode. No 1236
1598 * point in issuing this if DSC overlap isn't supported, some 1237 /* flush if staging buffer is full */
1599 * drives (Seagate STT3401A) will return an error. 1238 if (tape->valid == tape->buffer_size &&
1600 */ 1239 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1601 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) { 1240 tape->buffer_size) <= 0)
1602 ssize_t retval = idetape_queue_rw_tail(drive, 1241 return rc;
1603 REQ_IDETAPE_WRITE, 0, 1242
1604 tape->merge_bh); 1243 /* copy in */
1605 if (retval < 0) { 1244 todo = min_t(size_t, count - done,
1606 ide_tape_kfree_buffer(tape); 1245 tape->buffer_size - tape->valid);
1607 tape->merge_bh = NULL; 1246 if (copy_from_user(tape->cur, buf + done, todo))
1608 tape->chrdev_dir = IDETAPE_DIR_NONE;
1609 return retval;
1610 }
1611 }
1612 }
1613 if (count == 0)
1614 return (0);
1615 if (tape->merge_bh_size) {
1616 if (tape->merge_bh_size >= tape->buffer_size) {
1617 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
1618 tape->merge_bh_size = 0;
1619 }
1620 actually_written = min((unsigned int)
1621 (tape->buffer_size - tape->merge_bh_size),
1622 (unsigned int)count);
1623 if (idetape_copy_stage_from_user(tape, buf, actually_written))
1624 ret = -EFAULT;
1625 buf += actually_written;
1626 tape->merge_bh_size += actually_written;
1627 count -= actually_written;
1628
1629 if (tape->merge_bh_size == tape->buffer_size) {
1630 ssize_t retval;
1631 tape->merge_bh_size = 0;
1632 retval = idetape_add_chrdev_write_request(drive, ctl);
1633 if (retval <= 0)
1634 return (retval);
1635 }
1636 }
1637 while (count >= tape->buffer_size) {
1638 ssize_t retval;
1639 if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
1640 ret = -EFAULT;
1641 buf += tape->buffer_size;
1642 count -= tape->buffer_size;
1643 retval = idetape_add_chrdev_write_request(drive, ctl);
1644 actually_written += tape->buffer_size;
1645 if (retval <= 0)
1646 return (retval);
1647 }
1648 if (count) {
1649 actually_written += count;
1650 if (idetape_copy_stage_from_user(tape, buf, count))
1651 ret = -EFAULT; 1247 ret = -EFAULT;
1652 tape->merge_bh_size += count; 1248
1249 tape->cur += todo;
1250 tape->valid += todo;
1251 done += todo;
1653 } 1252 }
1654 return ret ? ret : actually_written; 1253
1254 return ret ? ret : done;
1655} 1255}
1656 1256
1657static int idetape_write_filemark(ide_drive_t *drive) 1257static int idetape_write_filemark(ide_drive_t *drive)
@@ -1812,7 +1412,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
1812 idetape_flush_tape_buffers(drive); 1412 idetape_flush_tape_buffers(drive);
1813 } 1413 }
1814 if (cmd == MTIOCGET || cmd == MTIOCPOS) { 1414 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
1815 block_offset = tape->merge_bh_size / 1415 block_offset = tape->valid /
1816 (tape->blk_size * tape->user_bs_factor); 1416 (tape->blk_size * tape->user_bs_factor);
1817 position = idetape_read_position(drive); 1417 position = idetape_read_position(drive);
1818 if (position < 0) 1418 if (position < 0)
@@ -1960,12 +1560,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
1960 idetape_tape_t *tape = drive->driver_data; 1560 idetape_tape_t *tape = drive->driver_data;
1961 1561
1962 ide_tape_flush_merge_buffer(drive); 1562 ide_tape_flush_merge_buffer(drive);
1963 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0); 1563 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
1964 if (tape->merge_bh != NULL) { 1564 if (tape->buf != NULL) {
1965 idetape_pad_zeros(drive, tape->blk_size * 1565 idetape_pad_zeros(drive, tape->blk_size *
1966 (tape->user_bs_factor - 1)); 1566 (tape->user_bs_factor - 1));
1967 ide_tape_kfree_buffer(tape); 1567 kfree(tape->buf);
1968 tape->merge_bh = NULL; 1568 tape->buf = NULL;
1969 } 1569 }
1970 idetape_write_filemark(drive); 1570 idetape_write_filemark(drive);
1971 idetape_flush_tape_buffers(drive); 1571 idetape_flush_tape_buffers(drive);
@@ -2159,8 +1759,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2159 u16 *ctl = (u16 *)&tape->caps[12]; 1759 u16 *ctl = (u16 *)&tape->caps[12];
2160 1760
2161 drive->pc_callback = ide_tape_callback; 1761 drive->pc_callback = ide_tape_callback;
2162 drive->pc_update_buffers = idetape_update_buffers;
2163 drive->pc_io_buffers = ide_tape_io_buffers;
2164 1762
2165 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP; 1763 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
2166 1764
@@ -2191,11 +1789,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2191 tape->buffer_size = *ctl * tape->blk_size; 1789 tape->buffer_size = *ctl * tape->blk_size;
2192 } 1790 }
2193 buffer_size = tape->buffer_size; 1791 buffer_size = tape->buffer_size;
2194 tape->pages_per_buffer = buffer_size / PAGE_SIZE;
2195 if (buffer_size % PAGE_SIZE) {
2196 tape->pages_per_buffer++;
2197 tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
2198 }
2199 1792
2200 /* select the "best" DSC read/write polling freq */ 1793 /* select the "best" DSC read/write polling freq */
2201 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); 1794 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2238,7 +1831,7 @@ static void ide_tape_release(struct device *dev)
2238 ide_drive_t *drive = tape->drive; 1831 ide_drive_t *drive = tape->drive;
2239 struct gendisk *g = tape->disk; 1832 struct gendisk *g = tape->disk;
2240 1833
2241 BUG_ON(tape->merge_bh_size); 1834 BUG_ON(tape->valid);
2242 1835
2243 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP; 1836 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2244 drive->driver_data = NULL; 1837 drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4aa6223c11b..a0c3e1b2f73 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -385,7 +385,7 @@ out_end:
385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
386 ide_finish_cmd(drive, cmd, stat); 386 ide_finish_cmd(drive, cmd, stat);
387 else 387 else
388 ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 388 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
389 return ide_stopped; 389 return ide_stopped;
390out_err: 390out_err:
391 ide_error_cmd(drive, cmd); 391 ide_error_cmd(drive, cmd);
@@ -424,7 +424,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
424 424
425 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 425 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
426 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 426 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
427 rq->buffer = buf; 427
428 if (cmd->tf_flags & IDE_TFLAG_WRITE)
429 rq->cmd_flags |= REQ_RW;
428 430
429 /* 431 /*
430 * (ks) We transfer currently only whole sectors. 432 * (ks) We transfer currently only whole sectors.
@@ -432,18 +434,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
432 * if we would find a solution to transfer any size. 434 * if we would find a solution to transfer any size.
433 * To support special commands like READ LONG. 435 * To support special commands like READ LONG.
434 */ 436 */
435 rq->hard_nr_sectors = rq->nr_sectors = nsect; 437 if (nsect) {
436 rq->hard_cur_sectors = rq->current_nr_sectors = nsect; 438 error = blk_rq_map_kern(drive->queue, rq, buf,
437 439 nsect * SECTOR_SIZE, __GFP_WAIT);
438 if (cmd->tf_flags & IDE_TFLAG_WRITE) 440 if (error)
439 rq->cmd_flags |= REQ_RW; 441 goto put_req;
442 }
440 443
441 rq->special = cmd; 444 rq->special = cmd;
442 cmd->rq = rq; 445 cmd->rq = rq;
443 446
444 error = blk_execute_rq(drive->queue, NULL, rq, 0); 447 error = blk_execute_rq(drive->queue, NULL, rq, 0);
445 blk_put_request(rq);
446 448
449put_req:
450 blk_put_request(rq);
447 return error; 451 return error;
448} 452}
449 453
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 248a54bd238..c2a16a8f486 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
177 u8 clock = inb(high_16 + 0x11); 177 u8 clock = inb(high_16 + 0x11);
178 178
179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); 179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
180 word_count = (rq->nr_sectors << 8); 180 word_count = (blk_rq_sectors(rq) << 8);
181 word_count = (rq_data_dir(rq) == READ) ? 181 word_count = (rq_data_dir(rq) == READ) ?
182 word_count | 0x05000000 : 182 word_count | 0x05000000 :
183 word_count | 0x06000000; 183 word_count | 0x06000000;
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6..05a93d6baec 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
112 ide_hwif_t *hwif = drive->hwif; 112 ide_hwif_t *hwif = drive->hwif;
113 unsigned long sc_base = hwif->config_data; 113 unsigned long sc_base = hwif->config_data;
114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
115 unsigned long nsectors = hwif->rq->nr_sectors; 115 unsigned long nsectors = blk_rq_sectors(hwif->rq);
116 116
117 /* 117 /*
118 * We have to manually load the sector count and size into 118 * We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d2397..5ca76224f6d 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? 307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); 308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
309 309
310 tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt); 310 tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
311 311
312 return 0; 312 return 0;
313} 313}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f..c0bebc6a2f2 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
672 msb->req_sg); 672 msb->req_sg);
673 673
674 if (!msb->seg_count) { 674 if (!msb->seg_count) {
675 chunk = __blk_end_request(msb->block_req, -ENOMEM, 675 chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
676 blk_rq_cur_bytes(msb->block_req));
677 continue; 676 continue;
678 } 677 }
679 678
680 t_sec = msb->block_req->sector << 9; 679 t_sec = blk_rq_pos(msb->block_req) << 9;
681 sector_div(t_sec, msb->page_size); 680 sector_div(t_sec, msb->page_size);
682 681
683 count = msb->block_req->nr_sectors << 9; 682 count = blk_rq_bytes(msb->block_req);
684 count /= msb->page_size; 683 count /= msb->page_size;
685 684
686 param.system = msb->system; 685 param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
705 return 0; 704 return 0;
706 } 705 }
707 706
708 dev_dbg(&card->dev, "elv_next\n"); 707 dev_dbg(&card->dev, "blk_fetch\n");
709 msb->block_req = elv_next_request(msb->queue); 708 msb->block_req = blk_fetch_request(msb->queue);
710 if (!msb->block_req) { 709 if (!msb->block_req) {
711 dev_dbg(&card->dev, "issue end\n"); 710 dev_dbg(&card->dev, "issue end\n");
712 return -EAGAIN; 711 return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
745 t_len *= msb->page_size; 744 t_len *= msb->page_size;
746 } 745 }
747 } else 746 } else
748 t_len = msb->block_req->nr_sectors << 9; 747 t_len = blk_rq_bytes(msb->block_req);
749 748
750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 749 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
751 750
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
825 return; 824 return;
826 825
827 if (msb->eject) { 826 if (msb->eject) {
828 while ((req = elv_next_request(q)) != NULL) 827 while ((req = blk_fetch_request(q)) != NULL)
829 __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 828 __blk_end_request_all(req, -ENODEV);
830 829
831 return; 830 return;
832 } 831 }
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b9..79f5433359f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1277 /* do we need to support multiple segments? */ 1277 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1280 ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1281 rsp->bio->bi_vcnt, rsp->data_len); 1281 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1282 return -EINVAL; 1282 return -EINVAL;
1283 } 1283 }
1284 1284
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1295 smpreq = (SmpPassthroughRequest_t *)mf; 1295 smpreq = (SmpPassthroughRequest_t *)mf;
1296 memset(smpreq, 0, sizeof(*smpreq)); 1296 memset(smpreq, 0, sizeof(*smpreq));
1297 1297
1298 smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 1298 smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
1300 1300
1301 if (rphy) 1301 if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 1321 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 1322 MPI_SGE_FLAGS_DIRECTION |
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (req->data_len - 4); 1324 flagsLength |= (blk_rq_bytes(req) - 4);
1325 1325
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 req->data_len, PCI_DMA_BIDIRECTIONAL); 1327 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 1328 if (!dma_addr_out)
1329 goto put_mf; 1329 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 1330 mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1332 1332
1333 /* response */ 1333 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
1335 flagsLength |= rsp->data_len + 4; 1335 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1337 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 1338 if (!dma_addr_in)
1339 goto unmap; 1339 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 1340 mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
1358 memcpy(req->sense, smprep, sizeof(*smprep)); 1358 memcpy(req->sense, smprep, sizeof(*smprep));
1359 req->sense_len = sizeof(*smprep); 1359 req->sense_len = sizeof(*smprep);
1360 req->data_len = 0; 1360 req->resid_len = 0;
1361 rsp->data_len -= smprep->ResponseDataLength; 1361 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 1362 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 1364 ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1366 } 1366 }
1367unmap: 1367unmap:
1368 if (dma_addr_out) 1368 if (dma_addr_out)
1369 pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 1369 pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
1370 PCI_DMA_BIDIRECTIONAL); 1370 PCI_DMA_BIDIRECTIONAL);
1371 if (dma_addr_in) 1371 if (dma_addr_in)
1372 pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 1372 pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
1373 PCI_DMA_BIDIRECTIONAL); 1373 PCI_DMA_BIDIRECTIONAL);
1374put_mf: 1374put_mf:
1375 if (mf) 1375 if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc4..6573ef4408f 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (blk_end_request(req, error, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes))
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431
432 if (blk_pc_request(req))
433 leftover = req->data_len;
434
435 if (error) 430 if (error)
436 blk_end_request(req, -EIO, leftover); 431 blk_end_request_all(req, -EIO);
437 }
438 432
439 spin_lock_irqsave(q->queue_lock, flags); 433 spin_lock_irqsave(q->queue_lock, flags);
440 434
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 755 break;
762 756
763 case CACHE_SMARTFETCH: 757 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 758 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 759 ctl_flags = 0x201F0008;
766 else 760 else
767 ctl_flags = 0x001F0000; 761 ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 775 ctl_flags = 0x001F0010;
782 break; 776 break;
783 case CACHE_SMARTBACK: 777 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 778 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 779 ctl_flags = 0x001F0004;
786 else 780 else
787 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
788 break; 782 break;
789 case CACHE_SMARTTHROUGH: 783 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
792 else 786 else
793 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
@@ -827,22 +821,22 @@ static int i2o_block_transfer(struct request *req)
827 821
828 *mptr++ = cpu_to_le32(scsi_flags); 822 *mptr++ = cpu_to_le32(scsi_flags);
829 823
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 824 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 825 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 826
833 memcpy(mptr, cmd, 10); 827 memcpy(mptr, cmd, 10);
834 mptr += 4; 828 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 829 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
836 } else 830 } else
837#endif 831#endif
838 { 832 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 833 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 834 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 835 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
842 *mptr++ = 836 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 837 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 838 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 839 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 840 }
847 841
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 842 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +877,7 @@ static void i2o_block_request_fn(struct request_queue *q)
883 struct request *req; 877 struct request *req;
884 878
885 while (!blk_queue_plugged(q)) { 879 while (!blk_queue_plugged(q)) {
886 req = elv_next_request(q); 880 req = blk_peek_request(q);
887 if (!req) 881 if (!req)
888 break; 882 break;
889 883
@@ -896,7 +890,7 @@ static void i2o_block_request_fn(struct request_queue *q)
896 890
897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 891 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
898 if (!i2o_block_transfer(req)) { 892 if (!i2o_block_transfer(req)) {
899 blkdev_dequeue_request(req); 893 blk_start_request(req);
900 continue; 894 continue;
901 } else 895 } else
902 osm_info("transfer error\n"); 896 osm_info("transfer error\n");
@@ -922,8 +916,10 @@ static void i2o_block_request_fn(struct request_queue *q)
922 blk_stop_queue(q); 916 blk_stop_queue(q);
923 break; 917 break;
924 } 918 }
925 } else 919 } else {
926 end_request(req, 0); 920 blk_start_request(req);
921 __blk_end_request_all(req, -EIO);
922 }
927 } 923 }
928}; 924};
929 925
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516a..c5df8654645 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
243 brq.mrq.cmd = &brq.cmd; 243 brq.mrq.cmd = &brq.cmd;
244 brq.mrq.data = &brq.data; 244 brq.mrq.data = &brq.data;
245 245
246 brq.cmd.arg = req->sector; 246 brq.cmd.arg = blk_rq_pos(req);
247 if (!mmc_card_blockaddr(card)) 247 if (!mmc_card_blockaddr(card))
248 brq.cmd.arg <<= 9; 248 brq.cmd.arg <<= 9;
249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 251 brq.stop.opcode = MMC_STOP_TRANSMISSION;
252 brq.stop.arg = 0; 252 brq.stop.arg = 0;
253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
254 brq.data.blocks = req->nr_sectors; 254 brq.data.blocks = blk_rq_sectors(req);
255 255
256 /* 256 /*
257 * The block layer doesn't support all sector count 257 * The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
301 * Adjust the sg list so it is the same size as the 301 * Adjust the sg list so it is the same size as the
302 * request. 302 * request.
303 */ 303 */
304 if (brq.data.blocks != req->nr_sectors) { 304 if (brq.data.blocks != blk_rq_sectors(req)) {
305 int i, data_size = brq.data.blocks << 9; 305 int i, data_size = brq.data.blocks << 9;
306 struct scatterlist *sg; 306 struct scatterlist *sg;
307 307
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
352 printk(KERN_ERR "%s: error %d transferring data," 352 printk(KERN_ERR "%s: error %d transferring data,"
353 " sector %u, nr %u, card status %#x\n", 353 " sector %u, nr %u, card status %#x\n",
354 req->rq_disk->disk_name, brq.data.error, 354 req->rq_disk->disk_name, brq.data.error,
355 (unsigned)req->sector, 355 (unsigned)blk_rq_pos(req),
356 (unsigned)req->nr_sectors, status); 356 (unsigned)blk_rq_sectors(req), status);
357 } 357 }
358 358
359 if (brq.stop.error) { 359 if (brq.stop.error) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c6..49e582356c6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
55 spin_lock_irq(q->queue_lock); 55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q)) 57 if (!blk_queue_plugged(q))
58 req = elv_next_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
88{ 88{
89 struct mmc_queue *mq = q->queuedata; 89 struct mmc_queue *mq = q->queuedata;
90 struct request *req; 90 struct request *req;
91 int ret;
92 91
93 if (!mq) { 92 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) { 94 while ((req = blk_fetch_request(q)) != NULL)
96 do { 95 __blk_end_request_all(req, -EIO);
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
99 } while (ret);
100 }
101 return; 96 return;
102 } 97 }
103 98
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb..502622f628b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
47 unsigned long block, nsect; 47 unsigned long block, nsect;
48 char *buf; 48 char *buf;
49 49
50 block = req->sector << 9 >> tr->blkshift; 50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
52 52
53 buf = req->buffer; 53 buf = req->buffer;
54 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD) 56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect); 57 return tr->discard(dev, block, nsect);
58 58
59 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
60 return 0; 60 return -EIO;
61 61
62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 return 0; 63 get_capacity(req->rq_disk))
64 return -EIO;
64 65
65 switch(rq_data_dir(req)) { 66 switch(rq_data_dir(req)) {
66 case READ: 67 case READ:
67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
68 if (tr->readsect(dev, block, buf)) 69 if (tr->readsect(dev, block, buf))
69 return 0; 70 return -EIO;
70 return 1; 71 return 0;
71 72
72 case WRITE: 73 case WRITE:
73 if (!tr->writesect) 74 if (!tr->writesect)
74 return 0; 75 return -EIO;
75 76
76 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
77 if (tr->writesect(dev, block, buf)) 78 if (tr->writesect(dev, block, buf))
78 return 0; 79 return -EIO;
79 return 1; 80 return 0;
80 81
81 default: 82 default:
82 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
83 return 0; 84 return -EIO;
84 } 85 }
85} 86}
86 87
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
88{ 89{
89 struct mtd_blktrans_ops *tr = arg; 90 struct mtd_blktrans_ops *tr = arg;
90 struct request_queue *rq = tr->blkcore_priv->rq; 91 struct request_queue *rq = tr->blkcore_priv->rq;
92 struct request *req = NULL;
91 93
92 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 94 /* we might get involved when memory gets low, so use PF_MEMALLOC */
93 current->flags |= PF_MEMALLOC; 95 current->flags |= PF_MEMALLOC;
94 96
95 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
98
96 while (!kthread_should_stop()) { 99 while (!kthread_should_stop()) {
97 struct request *req;
98 struct mtd_blktrans_dev *dev; 100 struct mtd_blktrans_dev *dev;
99 int res = 0; 101 int res;
100
101 req = elv_next_request(rq);
102 102
103 if (!req) { 103 if (!req && !(req = blk_fetch_request(rq))) {
104 set_current_state(TASK_INTERRUPTIBLE); 104 set_current_state(TASK_INTERRUPTIBLE);
105 spin_unlock_irq(rq->queue_lock); 105 spin_unlock_irq(rq->queue_lock);
106 schedule(); 106 schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
119 119
120 spin_lock_irq(rq->queue_lock); 120 spin_lock_irq(rq->queue_lock);
121 121
122 end_request(req, res); 122 if (!__blk_end_request_cur(req, res))
123 req = NULL;
123 } 124 }
125
126 if (req)
127 __blk_end_request_all(req, -EIO);
128
124 spin_unlock_irq(rq->queue_lock); 129 spin_unlock_irq(rq->queue_lock);
125 130
126 return 0; 131 return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c43..e64f62d5e0f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
603 if (dasd_profile_level != DASD_PROFILE_ON) 603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return; 604 return;
605 605
606 sectors = req->nr_sectors; 606 sectors = blk_rq_sectors(req);
607 if (!cqr->buildclk || !cqr->startclk || 607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk || 608 !cqr->stopclk || !cqr->endclk ||
609 !sectors) 609 !sectors)
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
1614} 1614}
1615 1615
1616/* 1616/*
1617 * posts the buffer_cache about a finalized request
1618 */
1619static inline void dasd_end_request(struct request *req, int error)
1620{
1621 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1622 BUG();
1623}
1624
1625/*
1626 * Process finished error recovery ccw. 1617 * Process finished error recovery ccw.
1627 */ 1618 */
1628static inline void __dasd_block_process_erp(struct dasd_block *block, 1619static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1665 if (basedev->state < DASD_STATE_READY) 1656 if (basedev->state < DASD_STATE_READY)
1666 return; 1657 return;
1667 /* Now we try to fetch requests from the request queue */ 1658 /* Now we try to fetch requests from the request queue */
1668 while (!blk_queue_plugged(queue) && 1659 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1669 elv_next_request(queue)) {
1670
1671 req = elv_next_request(queue);
1672
1673 if (basedev->features & DASD_FEATURE_READONLY && 1660 if (basedev->features & DASD_FEATURE_READONLY &&
1674 rq_data_dir(req) == WRITE) { 1661 rq_data_dir(req) == WRITE) {
1675 DBF_DEV_EVENT(DBF_ERR, basedev, 1662 DBF_DEV_EVENT(DBF_ERR, basedev,
1676 "Rejecting write request %p", 1663 "Rejecting write request %p",
1677 req); 1664 req);
1678 blkdev_dequeue_request(req); 1665 blk_start_request(req);
1679 dasd_end_request(req, -EIO); 1666 __blk_end_request_all(req, -EIO);
1680 continue; 1667 continue;
1681 } 1668 }
1682 cqr = basedev->discipline->build_cp(basedev, block, req); 1669 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1704 "CCW creation failed (rc=%ld) " 1691 "CCW creation failed (rc=%ld) "
1705 "on request %p", 1692 "on request %p",
1706 PTR_ERR(cqr), req); 1693 PTR_ERR(cqr), req);
1707 blkdev_dequeue_request(req); 1694 blk_start_request(req);
1708 dasd_end_request(req, -EIO); 1695 __blk_end_request_all(req, -EIO);
1709 continue; 1696 continue;
1710 } 1697 }
1711 /* 1698 /*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1714 */ 1701 */
1715 cqr->callback_data = (void *) req; 1702 cqr->callback_data = (void *) req;
1716 cqr->status = DASD_CQR_FILLED; 1703 cqr->status = DASD_CQR_FILLED;
1717 blkdev_dequeue_request(req); 1704 blk_start_request(req);
1718 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1705 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1719 dasd_profile_start(block, cqr, req); 1706 dasd_profile_start(block, cqr, req);
1720 } 1707 }
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1718 status = cqr->block->base->discipline->free_cp(cqr, req);
1732 if (status <= 0) 1719 if (status <= 0)
1733 error = status ? status : -EIO; 1720 error = status ? status : -EIO;
1734 dasd_end_request(req, error); 1721 __blk_end_request_all(req, error);
1735} 1722}
1736 1723
1737/* 1724/*
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2038 return; 2025 return;
2039 2026
2040 spin_lock_irq(&block->request_queue_lock); 2027 spin_lock_irq(&block->request_queue_lock);
2041 while ((req = elv_next_request(block->request_queue))) { 2028 while ((req = blk_fetch_request(block->request_queue)))
2042 blkdev_dequeue_request(req); 2029 __blk_end_request_all(req, -EIO);
2043 dasd_end_request(req, -EIO);
2044 }
2045 spin_unlock_irq(&block->request_queue_lock); 2030 spin_unlock_irq(&block->request_queue_lock);
2046} 2031}
2047 2032
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f773344..2efaddfae56 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
505 return ERR_PTR(-EINVAL); 505 return ERR_PTR(-EINVAL);
506 blksize = block->bp_block; 506 blksize = block->bp_block;
507 /* Calculate record id of first and last block. */ 507 /* Calculate record id of first and last block. */
508 first_rec = req->sector >> block->s2b_shift; 508 first_rec = blk_rq_pos(req) >> block->s2b_shift;
509 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 509 last_rec =
510 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
510 /* Check struct bio and count the number of blocks for the request. */ 511 /* Check struct bio and count the number of blocks for the request. */
511 count = 0; 512 count = 0;
512 rq_for_each_segment(bv, req, iter) { 513 rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f0..a41c94053e6 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2354 blksize = block->bp_block; 2354 blksize = block->bp_block;
2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2356 /* Calculate record id of first and last block. */ 2356 /* Calculate record id of first and last block. */
2357 first_rec = first_trk = req->sector >> block->s2b_shift; 2357 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2358 first_offs = sector_div(first_trk, blk_per_trk); 2358 first_offs = sector_div(first_trk, blk_per_trk);
2359 last_rec = last_trk = 2359 last_rec = last_trk =
2360 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2361 last_offs = sector_div(last_trk, blk_per_trk); 2361 last_offs = sector_div(last_trk, blk_per_trk);
2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2363 2363
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2420 private = (struct dasd_eckd_private *) cqr->block->base->private;
2421 blksize = cqr->block->bp_block; 2421 blksize = cqr->block->bp_block;
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2423 recid = req->sector >> cqr->block->s2b_shift; 2423 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2424 ccw = cqr->cpaddr; 2424 ccw = cqr->cpaddr;
2425 /* Skip over define extent & locate record. */ 2425 /* Skip over define extent & locate record. */
2426 ccw++; 2426 ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd1467..8912358daa2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
270 return ERR_PTR(-EINVAL); 270 return ERR_PTR(-EINVAL);
271 blksize = block->bp_block; 271 blksize = block->bp_block;
272 /* Calculate record id of first and last block. */ 272 /* Calculate record id of first and last block. */
273 first_rec = req->sector >> block->s2b_shift; 273 first_rec = blk_rq_pos(req) >> block->s2b_shift;
274 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 274 last_rec =
275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
275 /* Check struct bio and count the number of blocks for the request. */ 276 /* Check struct bio and count the number of blocks for the request. */
276 count = 0; 277 count = 0;
277 cidaw = 0; 278 cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
309 ccw = cqr->cpaddr; 310 ccw = cqr->cpaddr;
310 /* First ccw is define extent. */ 311 /* First ccw is define extent. */
311 define_extent(ccw++, cqr->data, rq_data_dir(req), 312 define_extent(ccw++, cqr->data, rq_data_dir(req),
312 block->bp_block, req->sector, req->nr_sectors); 313 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
313 /* Build locate_record + read/write ccws. */ 314 /* Build locate_record + read/write ccws. */
314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 315 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
315 LO_data = (struct LO_fba_data *) (idaws + cidaw); 316 LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd..2d00a383a47 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134 /* Setup ccws. */ 1134 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1135 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1136 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1139
1140 ccw = request->cpaddr; 1140 ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd..c453b2f3e9f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
633 struct req_iterator iter; 633 struct req_iterator iter;
634 634
635 DBF_EVENT(6, "xBREDid:"); 635 DBF_EVENT(6, "xBREDid:");
636 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
637 DBF_EVENT(6, "start_block = %i\n", start_block); 637 DBF_EVENT(6, "start_block = %i\n", start_block);
638 638
639 rq_for_each_segment(bv, req, iter) 639 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f..1e796767598 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int error)
78{
79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG();
81}
82
83static void
84__tapeblock_end_request(struct tape_request *ccw_req, void *data) 77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
85{ 78{
86 struct tape_device *device; 79 struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
90 83
91 device = ccw_req->device; 84 device = ccw_req->device;
92 req = (struct request *) data; 85 req = (struct request *) data;
93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
95 /* Update position. */ 88 /* Update position. */
96 device->blk_data.block_position = 89 device->blk_data.block_position =
97 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
98 else 91 else
99 /* We lost the position information due to an error. */ 92 /* We lost the position information due to an error. */
100 device->blk_data.block_position = -1; 93 device->blk_data.block_position = -1;
101 device->discipline->free_bread(ccw_req); 94 device->discipline->free_bread(ccw_req);
102 if (!list_empty(&device->req_queue) || 95 if (!list_empty(&device->req_queue) ||
103 elv_next_request(device->blk_data.request_queue)) 96 blk_peek_request(device->blk_data.request_queue))
104 tapeblock_trigger_requeue(device); 97 tapeblock_trigger_requeue(device);
105} 98}
106 99
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
118 ccw_req = device->discipline->bread(device, req); 111 ccw_req = device->discipline->bread(device, req);
119 if (IS_ERR(ccw_req)) { 112 if (IS_ERR(ccw_req)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 113 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req, -EIO); 114 blk_end_request_all(req, -EIO);
122 return PTR_ERR(ccw_req); 115 return PTR_ERR(ccw_req);
123 } 116 }
124 ccw_req->callback = __tapeblock_end_request; 117 ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
131 * Start/enqueueing failed. No retries in 124 * Start/enqueueing failed. No retries in
132 * this case. 125 * this case.
133 */ 126 */
134 tapeblock_end_request(req, -EIO); 127 blk_end_request_all(req, -EIO);
135 device->discipline->free_bread(ccw_req); 128 device->discipline->free_bread(ccw_req);
136 } 129 }
137 130
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
169 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
170 while ( 163 while (
171 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
172 elv_next_request(queue) && 165 (req = blk_fetch_request(queue)) &&
173 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
174 ) { 167 ) {
175 req = elv_next_request(queue);
176 if (rq_data_dir(req) == WRITE) { 168 if (rq_data_dir(req) == WRITE) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock); 170 spin_unlock_irq(&device->blk_data.request_queue_lock);
180 tapeblock_end_request(req, -EIO); 171 blk_end_request_all(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock); 172 spin_lock_irq(&device->blk_data.request_queue_lock);
182 continue; 173 continue;
183 } 174 }
184 blkdev_dequeue_request(req);
185 nr_queued++; 175 nr_queued++;
186 spin_unlock_irq(&device->blk_data.request_queue_lock); 176 spin_unlock_irq(&device->blk_data.request_queue_lock);
187 rc = tapeblock_start_request(device, req); 177 rc = tapeblock_start_request(device, req);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e854..6d465168468 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
186{ 186{
187 struct request *req; 187 struct request *req;
188 188
189 while ((req = elv_next_request(q)) != NULL) { 189 req = blk_fetch_request(q);
190 while (req) {
190 struct jsfd_part *jdp = req->rq_disk->private_data; 191 struct jsfd_part *jdp = req->rq_disk->private_data;
191 unsigned long offset = req->sector << 9; 192 unsigned long offset = blk_rq_pos(req) << 9;
192 size_t len = req->current_nr_sectors << 9; 193 size_t len = blk_rq_cur_bytes(req);
194 int err = -EIO;
193 195
194 if ((offset + len) > jdp->dsize) { 196 if ((offset + len) > jdp->dsize)
195 end_request(req, 0); 197 goto end;
196 continue;
197 }
198 198
199 if (rq_data_dir(req) != READ) { 199 if (rq_data_dir(req) != READ) {
200 printk(KERN_ERR "jsfd: write\n"); 200 printk(KERN_ERR "jsfd: write\n");
201 end_request(req, 0); 201 goto end;
202 continue;
203 } 202 }
204 203
205 if ((jdp->dbase & 0xff000000) != 0x20000000) { 204 if ((jdp->dbase & 0xff000000) != 0x20000000) {
206 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); 205 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
207 end_request(req, 0); 206 goto end;
208 continue;
209 } 207 }
210 208
211 jsfd_read(req->buffer, jdp->dbase + offset, len); 209 jsfd_read(req->buffer, jdp->dbase + offset, len);
212 210 err = 0;
213 end_request(req, 1); 211 end:
212 if (!__blk_end_request_cur(req, err))
213 req = blk_fetch_request(q);
214 } 214 }
215} 215}
216 216
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b..c7076ce25e2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e43678..54fa1e42dc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48..1bc3b756799 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..8032c5adb6a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1313 uint64_t failing_sector = 0;
1314 1314
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1316 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1318 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1319
1320 spin_lock(&_dump_buf_lock); 1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1321 if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 2378 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 2380 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 2381 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 2382 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 2383 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 2384 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 2386 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 2387 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 2388 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 2389 blk_rq_sectors(cmnd->request),
2390 cmnd); 2390 cmnd);
2391 2391
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 2406 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 2408 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 2409 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 2410 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 2411 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 2412 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 2414 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 2415 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 2416 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 2417 blk_rq_sectors(cmnd->request), cmnd);
2418 else 2418 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 2420 "9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a..5c65da519e3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab..5776b2ab6b1 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 889}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 890EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 891
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 892static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 893 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 894 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 904 else
925 pad_buff = io->pad_buff; 905 pad_buff = io->pad_buff;
926 906
927 ret = _append_map_kern(io->req, pad_buff, padding, 907 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 908 or->alloc_flags);
929 if (ret) 909 if (ret)
930 return ret; 910 return ret;
931 io->total_bytes += padding; 911 io->total_bytes += padding;
932 } 912 }
933 913
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 914 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 915 or->alloc_flags);
936 if (ret) 916 if (ret)
937 return ret; 917 return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1273/*
1294 * osd_finalize_request and helpers 1274 * osd_finalize_request and helpers
1295 */ 1275 */
1276static struct request *_make_request(struct request_queue *q, bool has_write,
1277 struct _osd_io_info *oii, gfp_t flags)
1278{
1279 if (oii->bio)
1280 return blk_make_request(q, oii->bio, flags);
1281 else {
1282 struct request *req;
1283
1284 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1285 if (unlikely(!req))
1286 return ERR_PTR(-ENOMEM);
1287
1288 return req;
1289 }
1290}
1296 1291
1297static int _init_blk_request(struct osd_request *or, 1292static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1293 bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1296 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1297 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1298 struct request *req;
1304 int ret = -ENOMEM; 1299 int ret;
1305 1300
1306 req = blk_get_request(q, has_out, flags); 1301 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1302 if (IS_ERR(req)) {
1303 ret = PTR_ERR(req);
1308 goto out; 1304 goto out;
1305 }
1309 1306
1310 or->request = req; 1307 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1308 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1315 or->out.req = req;
1319 if (has_in) { 1316 if (has_in) {
1320 /* allocate bidi request */ 1317 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1318 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1319 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1320 OSD_DEBUG("blk_get_request for bidi failed\n");
1321 ret = PTR_ERR(req);
1324 goto out; 1322 goto out;
1325 } 1323 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1324 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1362 return ret;
1365 } 1363 }
1366 1364
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1365 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1366 or->in.pad_buff = sg_in_pad_buffer;
1389 1367
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e9..dd3f9d2b99f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1359 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1360 struct Scsi_Host *shost = sdev->host;
1390 1361
1391 blkdev_dequeue_request(req); 1362 blk_start_request(req);
1392 1363
1393 if (unlikely(cmd == NULL)) { 1364 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1365 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1451
1481 if (!sdev) { 1452 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1453 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1454 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1455 scsi_kill_request(req, q);
1485 return; 1456 return;
1486 } 1457 }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1472 * that the request is fully prepared even if we cannot
1502 * accept it. 1473 * accept it.
1503 */ 1474 */
1504 req = elv_next_request(q); 1475 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1477 break;
1507 1478
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1488 * Remove the request from the request list.
1518 */ 1489 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1491 blk_start_request(req);
1521 sdev->device_busy++; 1492 sdev->device_busy++;
1522 1493
1523 spin_unlock(q->queue_lock); 1494 spin_unlock(q->queue_lock);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6..10303272ba4 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2..d606452297c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b63..40d2860f235 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff49279..82f14a9482d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd4..0fc2c0ae769 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1260,7 +1260,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1260 1260
1261 sense = rq->sense; 1261 sense = rq->sense;
1262 result = rq->errors; 1262 result = rq->errors;
1263 resid = rq->data_len; 1263 resid = rq->resid_len;
1264 1264
1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1266 sdp->disk->disk_name, srp->header.pack_id, result)); 1266 sdp->disk->disk_name, srp->header.pack_id, result));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad..fddba53c7fe 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f1..8681b708344 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cb..54023d41fd1 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807
diff --git a/fs/bio.c b/fs/bio.c
index 98711647ece..81dc93e7253 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1201,7 +1201,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
1201 char *addr = page_address(bvec->bv_page); 1201 char *addr = page_address(bvec->bv_page);
1202 int len = bmd->iovecs[i].bv_len; 1202 int len = bmd->iovecs[i].bv_len;
1203 1203
1204 if (read && !err) 1204 if (read)
1205 memcpy(p, addr, len); 1205 memcpy(p, addr, len);
1206 1206
1207 __free_page(bvec->bv_page); 1207 __free_page(bvec->bv_page);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f45dbc18dd1..a85fe310fc6 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,6 +331,12 @@ static int blkdev_readpage(struct file * file, struct page * page)
331 return block_read_full_page(page, blkdev_get_block); 331 return block_read_full_page(page, blkdev_get_block);
332} 332}
333 333
334static int blkdev_readpages(struct file *file, struct address_space *mapping,
335 struct list_head *pages, unsigned nr_pages)
336{
337 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
338}
339
334static int blkdev_write_begin(struct file *file, struct address_space *mapping, 340static int blkdev_write_begin(struct file *file, struct address_space *mapping,
335 loff_t pos, unsigned len, unsigned flags, 341 loff_t pos, unsigned len, unsigned flags,
336 struct page **pagep, void **fsdata) 342 struct page **pagep, void **fsdata)
@@ -1399,6 +1405,7 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
1399 1405
1400static const struct address_space_operations def_blk_aops = { 1406static const struct address_space_operations def_blk_aops = {
1401 .readpage = blkdev_readpage, 1407 .readpage = blkdev_readpage,
1408 .readpages = blkdev_readpages,
1402 .writepage = blkdev_writepage, 1409 .writepage = blkdev_writepage,
1403 .sync_page = block_sync_page, 1410 .sync_page = block_sync_page,
1404 .write_begin = blkdev_write_begin, 1411 .write_begin = blkdev_write_begin,
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 6a347fbc998..ffd42815fda 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -47,6 +47,8 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
47 struct pipe_inode_info *pipe, size_t count, 47 struct pipe_inode_info *pipe, size_t count,
48 unsigned int flags) 48 unsigned int flags)
49{ 49{
50 ssize_t (*splice_read)(struct file *, loff_t *,
51 struct pipe_inode_info *, size_t, unsigned int);
50 struct coda_file_info *cfi; 52 struct coda_file_info *cfi;
51 struct file *host_file; 53 struct file *host_file;
52 54
@@ -54,10 +56,11 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
54 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); 56 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
55 host_file = cfi->cfi_container; 57 host_file = cfi->cfi_container;
56 58
57 if (!host_file->f_op || !host_file->f_op->splice_read) 59 splice_read = host_file->f_op->splice_read;
58 return -EINVAL; 60 if (!splice_read)
61 splice_read = default_file_splice_read;
59 62
60 return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags); 63 return splice_read(host_file, ppos, pipe, count, flags);
61} 64}
62 65
63static ssize_t 66static ssize_t
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b249ae97fb1..06ca92672eb 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -50,10 +50,10 @@ int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid)
50 50
51 /* FIXME: should be include in osd_sense_info */ 51 /* FIXME: should be include in osd_sense_info */
52 if (in_resid) 52 if (in_resid)
53 *in_resid = or->in.req ? or->in.req->data_len : 0; 53 *in_resid = or->in.req ? or->in.req->resid_len : 0;
54 54
55 if (out_resid) 55 if (out_resid)
56 *out_resid = or->out.req ? or->out.req->data_len : 0; 56 *out_resid = or->out.req ? or->out.req->resid_len : 0;
57 57
58 return ret; 58 return ret;
59} 59}
diff --git a/fs/pipe.c b/fs/pipe.c
index 13414ec45b8..f7dd21ad85a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -302,6 +302,20 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info,
302 return 0; 302 return 0;
303} 303}
304 304
305/**
306 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
307 * @pipe: the pipe that the buffer belongs to
308 * @buf: the buffer to put a reference to
309 *
310 * Description:
311 * This function releases a reference to @buf.
312 */
313void generic_pipe_buf_release(struct pipe_inode_info *pipe,
314 struct pipe_buffer *buf)
315{
316 page_cache_release(buf->page);
317}
318
305static const struct pipe_buf_operations anon_pipe_buf_ops = { 319static const struct pipe_buf_operations anon_pipe_buf_ops = {
306 .can_merge = 1, 320 .can_merge = 1,
307 .map = generic_pipe_buf_map, 321 .map = generic_pipe_buf_map,
diff --git a/fs/read_write.c b/fs/read_write.c
index 9d1e76bb9ee..6c8c55dec2b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -805,12 +805,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
805 goto out; 805 goto out;
806 if (!(in_file->f_mode & FMODE_READ)) 806 if (!(in_file->f_mode & FMODE_READ))
807 goto fput_in; 807 goto fput_in;
808 retval = -EINVAL;
809 in_inode = in_file->f_path.dentry->d_inode;
810 if (!in_inode)
811 goto fput_in;
812 if (!in_file->f_op || !in_file->f_op->splice_read)
813 goto fput_in;
814 retval = -ESPIPE; 808 retval = -ESPIPE;
815 if (!ppos) 809 if (!ppos)
816 ppos = &in_file->f_pos; 810 ppos = &in_file->f_pos;
@@ -834,6 +828,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
834 retval = -EINVAL; 828 retval = -EINVAL;
835 if (!out_file->f_op || !out_file->f_op->sendpage) 829 if (!out_file->f_op || !out_file->f_op->sendpage)
836 goto fput_out; 830 goto fput_out;
831 in_inode = in_file->f_path.dentry->d_inode;
837 out_inode = out_file->f_path.dentry->d_inode; 832 out_inode = out_file->f_path.dentry->d_inode;
838 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); 833 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
839 if (retval < 0) 834 if (retval < 0)
diff --git a/fs/splice.c b/fs/splice.c
index 666953d59a3..73766d24f97 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -507,9 +507,131 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
507 507
508 return ret; 508 return ret;
509} 509}
510
511EXPORT_SYMBOL(generic_file_splice_read); 510EXPORT_SYMBOL(generic_file_splice_read);
512 511
512static const struct pipe_buf_operations default_pipe_buf_ops = {
513 .can_merge = 0,
514 .map = generic_pipe_buf_map,
515 .unmap = generic_pipe_buf_unmap,
516 .confirm = generic_pipe_buf_confirm,
517 .release = generic_pipe_buf_release,
518 .steal = generic_pipe_buf_steal,
519 .get = generic_pipe_buf_get,
520};
521
522static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
523 unsigned long vlen, loff_t offset)
524{
525 mm_segment_t old_fs;
526 loff_t pos = offset;
527 ssize_t res;
528
529 old_fs = get_fs();
530 set_fs(get_ds());
531 /* The cast to a user pointer is valid due to the set_fs() */
532 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
533 set_fs(old_fs);
534
535 return res;
536}
537
538static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
539 loff_t pos)
540{
541 mm_segment_t old_fs;
542 ssize_t res;
543
544 old_fs = get_fs();
545 set_fs(get_ds());
546 /* The cast to a user pointer is valid due to the set_fs() */
547 res = vfs_write(file, (const char __user *)buf, count, &pos);
548 set_fs(old_fs);
549
550 return res;
551}
552
553ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
554 struct pipe_inode_info *pipe, size_t len,
555 unsigned int flags)
556{
557 unsigned int nr_pages;
558 unsigned int nr_freed;
559 size_t offset;
560 struct page *pages[PIPE_BUFFERS];
561 struct partial_page partial[PIPE_BUFFERS];
562 struct iovec vec[PIPE_BUFFERS];
563 pgoff_t index;
564 ssize_t res;
565 size_t this_len;
566 int error;
567 int i;
568 struct splice_pipe_desc spd = {
569 .pages = pages,
570 .partial = partial,
571 .flags = flags,
572 .ops = &default_pipe_buf_ops,
573 .spd_release = spd_release_page,
574 };
575
576 index = *ppos >> PAGE_CACHE_SHIFT;
577 offset = *ppos & ~PAGE_CACHE_MASK;
578 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
579
580 for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
581 struct page *page;
582
583 page = alloc_page(GFP_USER);
584 error = -ENOMEM;
585 if (!page)
586 goto err;
587
588 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
589 vec[i].iov_base = (void __user *) page_address(page);
590 vec[i].iov_len = this_len;
591 pages[i] = page;
592 spd.nr_pages++;
593 len -= this_len;
594 offset = 0;
595 }
596
597 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
598 if (res < 0) {
599 error = res;
600 goto err;
601 }
602
603 error = 0;
604 if (!res)
605 goto err;
606
607 nr_freed = 0;
608 for (i = 0; i < spd.nr_pages; i++) {
609 this_len = min_t(size_t, vec[i].iov_len, res);
610 partial[i].offset = 0;
611 partial[i].len = this_len;
612 if (!this_len) {
613 __free_page(pages[i]);
614 pages[i] = NULL;
615 nr_freed++;
616 }
617 res -= this_len;
618 }
619 spd.nr_pages -= nr_freed;
620
621 res = splice_to_pipe(pipe, &spd);
622 if (res > 0)
623 *ppos += res;
624
625 return res;
626
627err:
628 for (i = 0; i < spd.nr_pages; i++)
629 __free_page(pages[i]);
630
631 return error;
632}
633EXPORT_SYMBOL(default_file_splice_read);
634
513/* 635/*
514 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 636 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
515 * using sendpage(). Return the number of bytes sent. 637 * using sendpage(). Return the number of bytes sent.
@@ -881,6 +1003,36 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
881 1003
882EXPORT_SYMBOL(generic_file_splice_write); 1004EXPORT_SYMBOL(generic_file_splice_write);
883 1005
1006static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1007 struct splice_desc *sd)
1008{
1009 int ret;
1010 void *data;
1011
1012 ret = buf->ops->confirm(pipe, buf);
1013 if (ret)
1014 return ret;
1015
1016 data = buf->ops->map(pipe, buf, 0);
1017 ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
1018 buf->ops->unmap(pipe, buf, data);
1019
1020 return ret;
1021}
1022
1023static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1024 struct file *out, loff_t *ppos,
1025 size_t len, unsigned int flags)
1026{
1027 ssize_t ret;
1028
1029 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
1030 if (ret > 0)
1031 *ppos += ret;
1032
1033 return ret;
1034}
1035
884/** 1036/**
885 * generic_splice_sendpage - splice data from a pipe to a socket 1037 * generic_splice_sendpage - splice data from a pipe to a socket
886 * @pipe: pipe to splice from 1038 * @pipe: pipe to splice from
@@ -908,11 +1060,10 @@ EXPORT_SYMBOL(generic_splice_sendpage);
908static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 1060static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
909 loff_t *ppos, size_t len, unsigned int flags) 1061 loff_t *ppos, size_t len, unsigned int flags)
910{ 1062{
1063 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1064 loff_t *, size_t, unsigned int);
911 int ret; 1065 int ret;
912 1066
913 if (unlikely(!out->f_op || !out->f_op->splice_write))
914 return -EINVAL;
915
916 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1067 if (unlikely(!(out->f_mode & FMODE_WRITE)))
917 return -EBADF; 1068 return -EBADF;
918 1069
@@ -923,7 +1074,11 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
923 if (unlikely(ret < 0)) 1074 if (unlikely(ret < 0))
924 return ret; 1075 return ret;
925 1076
926 return out->f_op->splice_write(pipe, out, ppos, len, flags); 1077 splice_write = out->f_op->splice_write;
1078 if (!splice_write)
1079 splice_write = default_file_splice_write;
1080
1081 return splice_write(pipe, out, ppos, len, flags);
927} 1082}
928 1083
929/* 1084/*
@@ -933,11 +1088,10 @@ static long do_splice_to(struct file *in, loff_t *ppos,
933 struct pipe_inode_info *pipe, size_t len, 1088 struct pipe_inode_info *pipe, size_t len,
934 unsigned int flags) 1089 unsigned int flags)
935{ 1090{
1091 ssize_t (*splice_read)(struct file *, loff_t *,
1092 struct pipe_inode_info *, size_t, unsigned int);
936 int ret; 1093 int ret;
937 1094
938 if (unlikely(!in->f_op || !in->f_op->splice_read))
939 return -EINVAL;
940
941 if (unlikely(!(in->f_mode & FMODE_READ))) 1095 if (unlikely(!(in->f_mode & FMODE_READ)))
942 return -EBADF; 1096 return -EBADF;
943 1097
@@ -945,7 +1099,11 @@ static long do_splice_to(struct file *in, loff_t *ppos,
945 if (unlikely(ret < 0)) 1099 if (unlikely(ret < 0))
946 return ret; 1100 return ret;
947 1101
948 return in->f_op->splice_read(in, ppos, pipe, len, flags); 1102 splice_read = in->f_op->splice_read;
1103 if (!splice_read)
1104 splice_read = default_file_splice_read;
1105
1106 return splice_read(in, ppos, pipe, len, flags);
949} 1107}
950 1108
951/** 1109/**
@@ -1112,6 +1270,9 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1112 return ret; 1270 return ret;
1113} 1271}
1114 1272
1273static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1274 struct pipe_inode_info *opipe,
1275 size_t len, unsigned int flags);
1115/* 1276/*
1116 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same 1277 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1117 * location, so checking ->i_pipe is not enough to verify that this is a 1278 * location, so checking ->i_pipe is not enough to verify that this is a
@@ -1132,12 +1293,32 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1132 struct file *out, loff_t __user *off_out, 1293 struct file *out, loff_t __user *off_out,
1133 size_t len, unsigned int flags) 1294 size_t len, unsigned int flags)
1134{ 1295{
1135 struct pipe_inode_info *pipe; 1296 struct pipe_inode_info *ipipe;
1297 struct pipe_inode_info *opipe;
1136 loff_t offset, *off; 1298 loff_t offset, *off;
1137 long ret; 1299 long ret;
1138 1300
1139 pipe = pipe_info(in->f_path.dentry->d_inode); 1301 ipipe = pipe_info(in->f_path.dentry->d_inode);
1140 if (pipe) { 1302 opipe = pipe_info(out->f_path.dentry->d_inode);
1303
1304 if (ipipe && opipe) {
1305 if (off_in || off_out)
1306 return -ESPIPE;
1307
1308 if (!(in->f_mode & FMODE_READ))
1309 return -EBADF;
1310
1311 if (!(out->f_mode & FMODE_WRITE))
1312 return -EBADF;
1313
1314 /* Splicing to self would be fun, but... */
1315 if (ipipe == opipe)
1316 return -EINVAL;
1317
1318 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1319 }
1320
1321 if (ipipe) {
1141 if (off_in) 1322 if (off_in)
1142 return -ESPIPE; 1323 return -ESPIPE;
1143 if (off_out) { 1324 if (off_out) {
@@ -1149,7 +1330,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1149 } else 1330 } else
1150 off = &out->f_pos; 1331 off = &out->f_pos;
1151 1332
1152 ret = do_splice_from(pipe, out, off, len, flags); 1333 ret = do_splice_from(ipipe, out, off, len, flags);
1153 1334
1154 if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1335 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1155 ret = -EFAULT; 1336 ret = -EFAULT;
@@ -1157,8 +1338,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1157 return ret; 1338 return ret;
1158 } 1339 }
1159 1340
1160 pipe = pipe_info(out->f_path.dentry->d_inode); 1341 if (opipe) {
1161 if (pipe) {
1162 if (off_out) 1342 if (off_out)
1163 return -ESPIPE; 1343 return -ESPIPE;
1164 if (off_in) { 1344 if (off_in) {
@@ -1170,7 +1350,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1170 } else 1350 } else
1171 off = &in->f_pos; 1351 off = &in->f_pos;
1172 1352
1173 ret = do_splice_to(in, off, pipe, len, flags); 1353 ret = do_splice_to(in, off, opipe, len, flags);
1174 1354
1175 if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1355 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1176 ret = -EFAULT; 1356 ret = -EFAULT;
@@ -1511,7 +1691,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1511 * Make sure there's data to read. Wait for input if we can, otherwise 1691 * Make sure there's data to read. Wait for input if we can, otherwise
1512 * return an appropriate error. 1692 * return an appropriate error.
1513 */ 1693 */
1514static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1694static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1515{ 1695{
1516 int ret; 1696 int ret;
1517 1697
@@ -1549,7 +1729,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1549 * Make sure there's writeable room. Wait for room if we can, otherwise 1729 * Make sure there's writeable room. Wait for room if we can, otherwise
1550 * return an appropriate error. 1730 * return an appropriate error.
1551 */ 1731 */
1552static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1732static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1553{ 1733{
1554 int ret; 1734 int ret;
1555 1735
@@ -1587,6 +1767,124 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1587} 1767}
1588 1768
1589/* 1769/*
1770 * Splice contents of ipipe to opipe.
1771 */
1772static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1773 struct pipe_inode_info *opipe,
1774 size_t len, unsigned int flags)
1775{
1776 struct pipe_buffer *ibuf, *obuf;
1777 int ret = 0, nbuf;
1778 bool input_wakeup = false;
1779
1780
1781retry:
1782 ret = ipipe_prep(ipipe, flags);
1783 if (ret)
1784 return ret;
1785
1786 ret = opipe_prep(opipe, flags);
1787 if (ret)
1788 return ret;
1789
1790 /*
1791 * Potential ABBA deadlock, work around it by ordering lock
1792 * grabbing by pipe info address. Otherwise two different processes
1793 * could deadlock (one doing tee from A -> B, the other from B -> A).
1794 */
1795 pipe_double_lock(ipipe, opipe);
1796
1797 do {
1798 if (!opipe->readers) {
1799 send_sig(SIGPIPE, current, 0);
1800 if (!ret)
1801 ret = -EPIPE;
1802 break;
1803 }
1804
1805 if (!ipipe->nrbufs && !ipipe->writers)
1806 break;
1807
1808 /*
1809 * Cannot make any progress, because either the input
1810 * pipe is empty or the output pipe is full.
1811 */
1812 if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
1813 /* Already processed some buffers, break */
1814 if (ret)
1815 break;
1816
1817 if (flags & SPLICE_F_NONBLOCK) {
1818 ret = -EAGAIN;
1819 break;
1820 }
1821
1822 /*
1823 * We raced with another reader/writer and haven't
1824 * managed to process any buffers. A zero return
1825 * value means EOF, so retry instead.
1826 */
1827 pipe_unlock(ipipe);
1828 pipe_unlock(opipe);
1829 goto retry;
1830 }
1831
1832 ibuf = ipipe->bufs + ipipe->curbuf;
1833 nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
1834 obuf = opipe->bufs + nbuf;
1835
1836 if (len >= ibuf->len) {
1837 /*
1838 * Simply move the whole buffer from ipipe to opipe
1839 */
1840 *obuf = *ibuf;
1841 ibuf->ops = NULL;
1842 opipe->nrbufs++;
1843 ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
1844 ipipe->nrbufs--;
1845 input_wakeup = true;
1846 } else {
1847 /*
1848 * Get a reference to this pipe buffer,
1849 * so we can copy the contents over.
1850 */
1851 ibuf->ops->get(ipipe, ibuf);
1852 *obuf = *ibuf;
1853
1854 /*
1855 * Don't inherit the gift flag, we need to
1856 * prevent multiple steals of this page.
1857 */
1858 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1859
1860 obuf->len = len;
1861 opipe->nrbufs++;
1862 ibuf->offset += obuf->len;
1863 ibuf->len -= obuf->len;
1864 }
1865 ret += obuf->len;
1866 len -= obuf->len;
1867 } while (len);
1868
1869 pipe_unlock(ipipe);
1870 pipe_unlock(opipe);
1871
1872 /*
1873 * If we put data in the output pipe, wakeup any potential readers.
1874 */
1875 if (ret > 0) {
1876 smp_mb();
1877 if (waitqueue_active(&opipe->wait))
1878 wake_up_interruptible(&opipe->wait);
1879 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1880 }
1881 if (input_wakeup)
1882 wakeup_pipe_writers(ipipe);
1883
1884 return ret;
1885}
1886
1887/*
1590 * Link contents of ipipe to opipe. 1888 * Link contents of ipipe to opipe.
1591 */ 1889 */
1592static int link_pipe(struct pipe_inode_info *ipipe, 1890static int link_pipe(struct pipe_inode_info *ipipe,
@@ -1690,9 +1988,9 @@ static long do_tee(struct file *in, struct file *out, size_t len,
1690 * Keep going, unless we encounter an error. The ipipe/opipe 1988 * Keep going, unless we encounter an error. The ipipe/opipe
1691 * ordering doesn't really matter. 1989 * ordering doesn't really matter.
1692 */ 1990 */
1693 ret = link_ipipe_prep(ipipe, flags); 1991 ret = ipipe_prep(ipipe, flags);
1694 if (!ret) { 1992 if (!ret) {
1695 ret = link_opipe_prep(opipe, flags); 1993 ret = opipe_prep(opipe, flags);
1696 if (!ret) 1994 if (!ret)
1697 ret = link_pipe(ipipe, opipe, len, flags); 1995 ret = link_pipe(ipipe, opipe, len, flags);
1698 } 1996 }
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b214fd672a..d30ec6f30dd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
218#define bio_sectors(bio) ((bio)->bi_size >> 9) 218#define bio_sectors(bio) ((bio)->bi_size >> 9)
219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
220 220
221static inline unsigned int bio_cur_sectors(struct bio *bio) 221static inline unsigned int bio_cur_bytes(struct bio *bio)
222{ 222{
223 if (bio->bi_vcnt) 223 if (bio->bi_vcnt)
224 return bio_iovec(bio)->bv_len >> 9; 224 return bio_iovec(bio)->bv_len;
225 else /* dataless requests such as discard */ 225 else /* dataless requests such as discard */
226 return bio->bi_size >> 9; 226 return bio->bi_size;
227} 227}
228 228
229static inline void *bio_data(struct bio *bio) 229static inline void *bio_data(struct bio *bio)
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
506} 506}
507 507
508/* 508/*
509 * BIO list managment for use by remapping drivers (e.g. DM or MD). 509 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
510 * 510 *
511 * A bio_list anchors a singly-linked list of bios chained through the bi_next 511 * A bio_list anchors a singly-linked list of bios chained through the bi_next
512 * member of the bio. The bio_list also caches the last list member to allow 512 * member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4f71f1a4af..56ce53fce72 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,9 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 166 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 167 unsigned long atomic_flags;
168 168
169 /* Maintain bio traversal state for part by part I/O submission. 169 /* the following two fields are internal, NEVER access directly */
170 * hard_* are block layer internals, no driver should touch them! 170 sector_t __sector; /* sector cursor */
171 */ 171 unsigned int __data_len; /* total data len */
172
173 sector_t sector; /* next sector to submit */
174 sector_t hard_sector; /* next sector to complete */
175 unsigned long nr_sectors; /* no. of sectors left to submit */
176 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
177 /* no. of sectors left to submit in the current segment */
178 unsigned int current_nr_sectors;
179
180 /* no. of sectors left to complete in the current segment */
181 unsigned int hard_cur_sectors;
182 172
183 struct bio *bio; 173 struct bio *bio;
184 struct bio *biotail; 174 struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
211 201
212 unsigned short ioprio; 202 unsigned short ioprio;
213 203
214 void *special; 204 void *special; /* opaque pointer available for LLD use */
215 char *buffer; 205 char *buffer; /* kaddr of the current segment if available */
216 206
217 int tag; 207 int tag;
218 int errors; 208 int errors;
@@ -226,10 +216,9 @@ struct request {
226 unsigned char __cmd[BLK_MAX_CDB]; 216 unsigned char __cmd[BLK_MAX_CDB];
227 unsigned char *cmd; 217 unsigned char *cmd;
228 218
229 unsigned int data_len;
230 unsigned int extra_len; /* length of alignment and padding */ 219 unsigned int extra_len; /* length of alignment and padding */
231 unsigned int sense_len; 220 unsigned int sense_len;
232 void *data; 221 unsigned int resid_len; /* residual count */
233 void *sense; 222 void *sense;
234 223
235 unsigned long deadline; 224 unsigned long deadline;
@@ -415,7 +404,7 @@ struct request_queue
415 struct list_head tag_busy_list; 404 struct list_head tag_busy_list;
416 405
417 unsigned int nr_sorted; 406 unsigned int nr_sorted;
418 unsigned int in_flight; 407 unsigned int in_flight[2];
419 408
420 unsigned int rq_timeout; 409 unsigned int rq_timeout;
421 struct timer_list timeout; 410 struct timer_list timeout;
@@ -522,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
522 __clear_bit(flag, &q->queue_flags); 511 __clear_bit(flag, &q->queue_flags);
523} 512}
524 513
514static inline int queue_in_flight(struct request_queue *q)
515{
516 return q->in_flight[0] + q->in_flight[1];
517}
518
525static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 519static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
526{ 520{
527 WARN_ON_ONCE(!queue_is_locked(q)); 521 WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,6 +746,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
752extern void blk_put_request(struct request *); 746extern void blk_put_request(struct request *);
753extern void __blk_put_request(struct request_queue *, struct request *); 747extern void __blk_put_request(struct request_queue *, struct request *);
754extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 748extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
749extern struct request *blk_make_request(struct request_queue *, struct bio *,
750 gfp_t);
755extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 751extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
756extern void blk_requeue_request(struct request_queue *, struct request *); 752extern void blk_requeue_request(struct request_queue *, struct request *);
757extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 753extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
@@ -768,12 +764,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
768 struct scsi_ioctl_command __user *); 764 struct scsi_ioctl_command __user *);
769 765
770/* 766/*
771 * Temporary export, until SCSI gets fixed up.
772 */
773extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
774 struct bio *bio);
775
776/*
777 * A queue has just exitted congestion. Note this in the global counter of 767 * A queue has just exitted congestion. Note this in the global counter of
778 * congested queues, and wake up anyone who was waiting for requests to be 768 * congested queues, and wake up anyone who was waiting for requests to be
779 * put back. 769 * put back.
@@ -798,7 +788,6 @@ extern void blk_sync_queue(struct request_queue *q);
798extern void __blk_stop_queue(struct request_queue *q); 788extern void __blk_stop_queue(struct request_queue *q);
799extern void __blk_run_queue(struct request_queue *); 789extern void __blk_run_queue(struct request_queue *);
800extern void blk_run_queue(struct request_queue *); 790extern void blk_run_queue(struct request_queue *);
801extern void blk_start_queueing(struct request_queue *);
802extern int blk_rq_map_user(struct request_queue *, struct request *, 791extern int blk_rq_map_user(struct request_queue *, struct request *,
803 struct rq_map_data *, void __user *, unsigned long, 792 struct rq_map_data *, void __user *, unsigned long,
804 gfp_t); 793 gfp_t);
@@ -831,41 +820,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
831 blk_run_backing_dev(mapping->backing_dev_info, NULL); 820 blk_run_backing_dev(mapping->backing_dev_info, NULL);
832} 821}
833 822
834extern void blkdev_dequeue_request(struct request *req); 823/*
824 * blk_rq_pos() : the current sector
825 * blk_rq_bytes() : bytes left in the entire request
826 * blk_rq_cur_bytes() : bytes left in the current segment
827 * blk_rq_sectors() : sectors left in the entire request
828 * blk_rq_cur_sectors() : sectors left in the current segment
829 */
830static inline sector_t blk_rq_pos(const struct request *rq)
831{
832 return rq->__sector;
833}
834
835static inline unsigned int blk_rq_bytes(const struct request *rq)
836{
837 return rq->__data_len;
838}
839
840static inline int blk_rq_cur_bytes(const struct request *rq)
841{
842 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
843}
844
845static inline unsigned int blk_rq_sectors(const struct request *rq)
846{
847 return blk_rq_bytes(rq) >> 9;
848}
849
850static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
851{
852 return blk_rq_cur_bytes(rq) >> 9;
853}
854
855/*
856 * Request issue related functions.
857 */
858extern struct request *blk_peek_request(struct request_queue *q);
859extern void blk_start_request(struct request *rq);
860extern struct request *blk_fetch_request(struct request_queue *q);
835 861
836/* 862/*
837 * blk_end_request() and friends. 863 * Request completion related functions.
838 * __blk_end_request() and end_request() must be called with 864 *
839 * the request queue spinlock acquired. 865 * blk_update_request() completes given number of bytes and updates
866 * the request without completing it.
867 *
868 * blk_end_request() and friends. __blk_end_request() must be called
869 * with the request queue spinlock acquired.
840 * 870 *
841 * Several drivers define their own end_request and call 871 * Several drivers define their own end_request and call
842 * blk_end_request() for parts of the original function. 872 * blk_end_request() for parts of the original function.
843 * This prevents code duplication in drivers. 873 * This prevents code duplication in drivers.
844 */ 874 */
845extern int blk_end_request(struct request *rq, int error, 875extern bool blk_update_request(struct request *rq, int error,
846 unsigned int nr_bytes); 876 unsigned int nr_bytes);
847extern int __blk_end_request(struct request *rq, int error, 877extern bool blk_end_request(struct request *rq, int error,
848 unsigned int nr_bytes); 878 unsigned int nr_bytes);
849extern int blk_end_bidi_request(struct request *rq, int error, 879extern void blk_end_request_all(struct request *rq, int error);
850 unsigned int nr_bytes, unsigned int bidi_bytes); 880extern bool blk_end_request_cur(struct request *rq, int error);
851extern void end_request(struct request *, int); 881extern bool __blk_end_request(struct request *rq, int error,
852extern int blk_end_request_callback(struct request *rq, int error, 882 unsigned int nr_bytes);
853 unsigned int nr_bytes, 883extern void __blk_end_request_all(struct request *rq, int error);
854 int (drv_callback)(struct request *)); 884extern bool __blk_end_request_cur(struct request *rq, int error);
885
855extern void blk_complete_request(struct request *); 886extern void blk_complete_request(struct request *);
856extern void __blk_complete_request(struct request *); 887extern void __blk_complete_request(struct request *);
857extern void blk_abort_request(struct request *); 888extern void blk_abort_request(struct request *);
858extern void blk_abort_queue(struct request_queue *); 889extern void blk_abort_queue(struct request_queue *);
859extern void blk_update_request(struct request *rq, int error,
860 unsigned int nr_bytes);
861
862/*
863 * blk_end_request() takes bytes instead of sectors as a complete size.
864 * blk_rq_bytes() returns bytes left to complete in the entire request.
865 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
866 */
867extern unsigned int blk_rq_bytes(struct request *rq);
868extern unsigned int blk_rq_cur_bytes(struct request *rq);
869 890
870/* 891/*
871 * Access functions for manipulating queue properties 892 * Access functions for manipulating queue properties
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769f62b..1cb3372e65d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
103extern void elv_merge_requests(struct request_queue *, struct request *, 103extern void elv_merge_requests(struct request_queue *, struct request *,
104 struct request *); 104 struct request *);
105extern void elv_merged_request(struct request_queue *, struct request *, int); 105extern void elv_merged_request(struct request_queue *, struct request *, int);
106extern void elv_dequeue_request(struct request_queue *, struct request *);
107extern void elv_requeue_request(struct request_queue *, struct request *); 106extern void elv_requeue_request(struct request_queue *, struct request *);
108extern int elv_queue_empty(struct request_queue *); 107extern int elv_queue_empty(struct request_queue *);
109extern struct request *elv_next_request(struct request_queue *q);
110extern struct request *elv_former_request(struct request_queue *, struct request *); 108extern struct request *elv_former_request(struct request_queue *, struct request *);
111extern struct request *elv_latter_request(struct request_queue *, struct request *); 109extern struct request *elv_latter_request(struct request_queue *, struct request *);
112extern int elv_register_queue(struct request_queue *q); 110extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
171 ELV_MQUEUE_MUST, 169 ELV_MQUEUE_MUST,
172}; 170};
173 171
174#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) 172#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
175#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 173#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
176 174
177/* 175/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b534e527e0..83d6b439724 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov,
2205/* fs/splice.c */ 2205/* fs/splice.c */
2206extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2206extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2207 struct pipe_inode_info *, size_t, unsigned int); 2207 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t default_file_splice_read(struct file *, loff_t *,
2209 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2210extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2209 struct file *, loff_t *, size_t, unsigned int); 2211 struct file *, loff_t *, size_t, unsigned int);
2210extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2212extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 9fed365a598..867cb68d846 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -26,6 +26,9 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/mutex.h> 27#include <asm/mutex.h>
28 28
29/* for request_sense */
30#include <linux/cdrom.h>
31
29#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) 32#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
30# define SUPPORT_VLB_SYNC 0 33# define SUPPORT_VLB_SYNC 0
31#else 34#else
@@ -324,7 +327,6 @@ struct ide_cmd {
324 unsigned int cursg_ofs; 327 unsigned int cursg_ofs;
325 328
326 struct request *rq; /* copy of request */ 329 struct request *rq; /* copy of request */
327 void *special; /* valid_t generally */
328}; 330};
329 331
330/* ATAPI packet command flags */ 332/* ATAPI packet command flags */
@@ -360,11 +362,7 @@ struct ide_atapi_pc {
360 362
361 /* data buffer */ 363 /* data buffer */
362 u8 *buf; 364 u8 *buf;
363 /* current buffer position */
364 u8 *cur_pos;
365 int buf_size; 365 int buf_size;
366 /* missing/available data on the current buffer */
367 int b_count;
368 366
369 /* the corresponding request */ 367 /* the corresponding request */
370 struct request *rq; 368 struct request *rq;
@@ -377,10 +375,6 @@ struct ide_atapi_pc {
377 */ 375 */
378 u8 pc_buf[IDE_PC_BUFFER_SIZE]; 376 u8 pc_buf[IDE_PC_BUFFER_SIZE];
379 377
380 /* idetape only */
381 struct idetape_bh *bh;
382 char *b_data;
383
384 unsigned long timeout; 378 unsigned long timeout;
385}; 379};
386 380
@@ -593,16 +587,16 @@ struct ide_drive_s {
593 /* callback for packet commands */ 587 /* callback for packet commands */
594 int (*pc_callback)(struct ide_drive_s *, int); 588 int (*pc_callback)(struct ide_drive_s *, int);
595 589
596 void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
597 int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
598 unsigned int, int);
599
600 ide_startstop_t (*irq_handler)(struct ide_drive_s *); 590 ide_startstop_t (*irq_handler)(struct ide_drive_s *);
601 591
602 unsigned long atapi_flags; 592 unsigned long atapi_flags;
603 593
604 struct ide_atapi_pc request_sense_pc; 594 struct ide_atapi_pc request_sense_pc;
605 struct request request_sense_rq; 595
596 /* current sense rq and buffer */
597 bool sense_rq_armed;
598 struct request sense_rq;
599 struct request_sense sense_data;
606}; 600};
607 601
608typedef struct ide_drive_s ide_drive_t; 602typedef struct ide_drive_s ide_drive_t;
@@ -1174,7 +1168,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
1174int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); 1168int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
1175int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); 1169int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
1176void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); 1170void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
1177void ide_retry_pc(ide_drive_t *, struct gendisk *); 1171void ide_retry_pc(ide_drive_t *drive);
1172
1173void ide_prep_sense(ide_drive_t *drive, struct request *rq);
1174int ide_queue_sense_rq(ide_drive_t *drive, void *special);
1178 1175
1179int ide_cd_expiry(ide_drive_t *); 1176int ide_cd_expiry(ide_drive_t *);
1180 1177
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40725447f5e..66c194e2d9b 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
56 gfp_t old_gfp_mask; 56 gfp_t old_gfp_mask;
57 57
58 spinlock_t lo_lock; 58 spinlock_t lo_lock;
59 struct bio *lo_bio; 59 struct bio_list lo_bio_list;
60 struct bio *lo_biotail;
61 int lo_state; 60 int lo_state;
62 struct mutex lo_ctl_mutex; 61 struct mutex lo_ctl_mutex;
63 struct task_struct *lo_thread; 62 struct task_struct *lo_thread;
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index 1f76b1ebf62..00000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,206 +0,0 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8f038554e8..b43a9e03905 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
155void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
155 156
156#endif 157#endif
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5f3faa9d15a..18e7c7c0cae 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
11#include <linux/pipe_fs_i.h> 11#include <linux/pipe_fs_i.h>
12 12
13/* 13/*
14 * splice is tied to pipes as a transport (at least for now), so we'll just 14 * Flags passed in from splice/tee/vmsplice
15 * add the splice flags here.
16 */ 15 */
17#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ 16#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
18#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ 17#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d29869..4dbcbc1c348 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,7 @@
15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
18#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
18 19
19struct virtio_blk_config 20struct virtio_blk_config
20{ 21{
@@ -55,6 +56,13 @@ struct virtio_blk_outhdr
55 __u64 sector; 56 __u64 sector;
56}; 57};
57 58
59struct virtio_scsi_inhdr {
60 __u32 errors;
61 __u32 data_len;
62 __u32 sense_len;
63 __u32 residual;
64};
65
58/* And this is the final byte of the write scatter-gather list. */ 66/* And this is the final byte of the write scatter-gather list. */
59#define VIRTIO_BLK_S_OK 0 67#define VIRTIO_BLK_S_OK 0
60#define VIRTIO_BLK_S_IOERR 1 68#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d36925..3878d1dc7f5 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
270 270
271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
272{ 272{
273 return scmd->request->sector; 273 return blk_rq_pos(scmd->request);
274} 274}
275 275
276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 921ef5d1f0b..5708a14bee5 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
642 642
643 if (blk_pc_request(rq)) { 643 if (blk_pc_request(rq)) {
644 what |= BLK_TC_ACT(BLK_TC_PC); 644 what |= BLK_TC_ACT(BLK_TC_PC);
645 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 645 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
646 rq->cmd_len, rq->cmd); 646 what, rq->errors, rq->cmd_len, rq->cmd);
647 } else { 647 } else {
648 what |= BLK_TC_ACT(BLK_TC_FS); 648 what |= BLK_TC_ACT(BLK_TC_FS);
649 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 649 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
650 rw, what, rq->errors, 0, NULL); 650 what, rq->errors, 0, NULL);
651 } 651 }
652} 652}
653 653
@@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q,
854 return; 854 return;
855 855
856 if (blk_pc_request(rq)) 856 if (blk_pc_request(rq))
857 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, 857 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
858 rq->errors, len, data); 858 BLK_TA_DRV_DATA, rq->errors, len, data);
859 else 859 else
860 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 860 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
861 0, BLK_TA_DRV_DATA, rq->errors, len, data); 861 BLK_TA_DRV_DATA, rq->errors, len, data);
862} 862}
863EXPORT_SYMBOL_GPL(blk_add_driver_data); 863EXPORT_SYMBOL_GPL(blk_add_driver_data);
864 864