aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block59
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-cciss33
-rw-r--r--Documentation/block/biodoc.txt2
-rw-r--r--arch/arm/plat-omap/mailbox.c63
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--block/Kconfig11
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blk-barrier.c27
-rw-r--r--block/blk-core.c848
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-integrity.c2
-rw-r--r--block/blk-ioc.c12
-rw-r--r--block/blk-map.c25
-rw-r--r--block/blk-merge.c71
-rw-r--r--block/blk-settings.c269
-rw-r--r--block/blk-sysfs.c62
-rw-r--r--block/blk-tag.c17
-rw-r--r--block/blk-timeout.c22
-rw-r--r--block/blk.h51
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c38
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c185
-rw-r--r--block/genhd.c11
-rw-r--r--block/ioctl.c12
-rw-r--r--block/scsi_ioctl.c13
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c927
-rw-r--r--drivers/block/cciss.h34
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/cciss_scsi.c109
-rw-r--r--drivers/block/cpqarray.c20
-rw-r--r--drivers/block/floppy.c85
-rw-r--r--drivers/block/hd.c106
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c537
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/block/ps3disk.c24
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c54
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c110
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c34
-rw-r--r--drivers/block/xsysace.c46
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c36
-rw-r--r--drivers/cdrom/viocd.c33
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/ide/ide-atapi.c21
-rw-r--r--drivers/ide/ide-cd.c64
-rw-r--r--drivers/ide/ide-disk.c10
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-floppy.c10
-rw-r--r--drivers/ide/ide-io.c43
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-tape.c12
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/pdc202xx_old.c2
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c38
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/memstick/core/mspro_block.c19
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c43
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/s390/block/dasd.c37
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c26
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c23
-rw-r--r--drivers/scsi/osd/osd_initiator.c72
-rw-r--r--drivers/scsi/scsi_lib.c87
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--fs/bio.c26
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/coda/file.c9
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/exofs/osd.c4
-rw-r--r--fs/ext3/super.c4
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/ntfs/super.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/partitions/check.c10
-rw-r--r--fs/partitions/ibm.c2
-rw-r--r--fs/partitions/msdos.c4
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/read_write.c7
-rw-r--r--fs/splice.c338
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h245
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/virtio_blk.h12
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/trace/events/block.h29
-rw-r--r--kernel/trace/blktrace.c21
-rw-r--r--mm/bounce.c4
158 files changed, 3790 insertions, 2760 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 44f52a4f5903..cbbd3e069945 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -60,3 +60,62 @@ Description:
60 Indicates whether the block layer should automatically 60 Indicates whether the block layer should automatically
61 generate checksums for write requests bound for 61 generate checksums for write requests bound for
62 devices that support receiving integrity metadata. 62 devices that support receiving integrity metadata.
63
64What: /sys/block/<disk>/alignment_offset
65Date: April 2009
66Contact: Martin K. Petersen <martin.petersen@oracle.com>
67Description:
68 Storage devices may report a physical block size that is
69 bigger than the logical block size (for instance a drive
70 with 4KB physical sectors exposing 512-byte logical
71 blocks to the operating system). This parameter
72 indicates how many bytes the beginning of the device is
73 offset from the disk's natural alignment.
74
75What: /sys/block/<disk>/<partition>/alignment_offset
76Date: April 2009
77Contact: Martin K. Petersen <martin.petersen@oracle.com>
78Description:
79 Storage devices may report a physical block size that is
80 bigger than the logical block size (for instance a drive
81 with 4KB physical sectors exposing 512-byte logical
82 blocks to the operating system). This parameter
83 indicates how many bytes the beginning of the partition
84 is offset from the disk's natural alignment.
85
86What: /sys/block/<disk>/queue/logical_block_size
87Date: May 2009
88Contact: Martin K. Petersen <martin.petersen@oracle.com>
89Description:
90 This is the smallest unit the storage device can
91 address. It is typically 512 bytes.
92
93What: /sys/block/<disk>/queue/physical_block_size
94Date: May 2009
95Contact: Martin K. Petersen <martin.petersen@oracle.com>
96Description:
97 This is the smallest unit the storage device can write
98 without resorting to read-modify-write operation. It is
99 usually the same as the logical block size but may be
100 bigger. One example is SATA drives with 4KB sectors
101 that expose a 512-byte logical block size to the
102 operating system.
103
104What: /sys/block/<disk>/queue/minimum_io_size
105Date: April 2009
106Contact: Martin K. Petersen <martin.petersen@oracle.com>
107Description:
108 Storage devices may report a preferred minimum I/O size,
109 which is the smallest request the device can perform
110 without incurring a read-modify-write penalty. For disk
111 drives this is often the physical block size. For RAID
112 arrays it is often the stripe chunk size.
113
114What: /sys/block/<disk>/queue/optimal_io_size
115Date: April 2009
116Contact: Martin K. Petersen <martin.petersen@oracle.com>
117Description:
118 Storage devices may report an optimal I/O size, which is
119 the device's preferred unit of receiving I/O. This is
120 rarely reported for disk drives. For RAID devices it is
121 usually the stripe width or the internal block size.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
new file mode 100644
index 000000000000..0a92a7c93a62
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
@@ -0,0 +1,33 @@
1Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/model
2Date: March 2009
3Kernel Version: 2.6.30
4Contact: iss_storagedev@hp.com
5Description: Displays the SCSI INQUIRY page 0 model for logical drive
6 Y of controller X.
7
8Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/rev
9Date: March 2009
10Kernel Version: 2.6.30
11Contact: iss_storagedev@hp.com
12Description: Displays the SCSI INQUIRY page 0 revision for logical
13 drive Y of controller X.
14
15Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/unique_id
16Date: March 2009
17Kernel Version: 2.6.30
18Contact: iss_storagedev@hp.com
19Description: Displays the SCSI INQUIRY page 83 serial number for logical
20 drive Y of controller X.
21
22Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/vendor
23Date: March 2009
24Kernel Version: 2.6.30
25Contact: iss_storagedev@hp.com
26Description: Displays the SCSI INQUIRY page 0 vendor for logical drive
27 Y of controller X.
28
29Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/block:cciss!cXdY
30Date: March 2009
31Kernel Version: 2.6.30
32Contact: iss_storagedev@hp.com
33Description: A symbolic link to /sys/block/cciss!cXdY
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6fab97ea7e6b..8d2158a1c6aa 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -186,7 +186,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
186do not have a corresponding kernel virtual address space mapping) and 186do not have a corresponding kernel virtual address space mapping) and
187low-memory pages. 187low-memory pages.
188 188
189Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion 189Note: Please refer to Documentation/DMA-mapping.txt for a discussion
190on PCI high mem DMA aspects and mapping of scatter gather lists, and support 190on PCI high mem DMA aspects and mapping of scatter gather lists, and support
191for 64 bit PCI. 191for 64 bit PCI.
192 192
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 0abfbaa59871..40424edae939 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
147 return ret; 147 return ret;
148} 148}
149 149
150struct omap_msg_tx_data {
151 mbox_msg_t msg;
152 void *arg;
153};
154
155static void omap_msg_tx_end_io(struct request *rq, int error)
156{
157 kfree(rq->special);
158 __blk_put_request(rq->q, rq);
159}
160
150int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg) 161int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
151{ 162{
163 struct omap_msg_tx_data *tx_data;
152 struct request *rq; 164 struct request *rq;
153 struct request_queue *q = mbox->txq->queue; 165 struct request_queue *q = mbox->txq->queue;
154 int ret = 0; 166
167 tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
168 if (unlikely(!tx_data))
169 return -ENOMEM;
155 170
156 rq = blk_get_request(q, WRITE, GFP_ATOMIC); 171 rq = blk_get_request(q, WRITE, GFP_ATOMIC);
157 if (unlikely(!rq)) { 172 if (unlikely(!rq)) {
158 ret = -ENOMEM; 173 kfree(tx_data);
159 goto fail; 174 return -ENOMEM;
160 } 175 }
161 176
162 rq->data = (void *)msg; 177 tx_data->msg = msg;
163 blk_insert_request(q, rq, 0, arg); 178 tx_data->arg = arg;
179 rq->end_io = omap_msg_tx_end_io;
180 blk_insert_request(q, rq, 0, tx_data);
164 181
165 schedule_work(&mbox->txq->work); 182 schedule_work(&mbox->txq->work);
166 fail: 183 return 0;
167 return ret;
168} 184}
169EXPORT_SYMBOL(omap_mbox_msg_send); 185EXPORT_SYMBOL(omap_mbox_msg_send);
170 186
@@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
178 struct request_queue *q = mbox->txq->queue; 194 struct request_queue *q = mbox->txq->queue;
179 195
180 while (1) { 196 while (1) {
197 struct omap_msg_tx_data *tx_data;
198
181 spin_lock(q->queue_lock); 199 spin_lock(q->queue_lock);
182 rq = elv_next_request(q); 200 rq = blk_fetch_request(q);
183 spin_unlock(q->queue_lock); 201 spin_unlock(q->queue_lock);
184 202
185 if (!rq) 203 if (!rq)
186 break; 204 break;
187 205
188 ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special); 206 tx_data = rq->special;
207
208 ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
189 if (ret) { 209 if (ret) {
190 enable_mbox_irq(mbox, IRQ_TX); 210 enable_mbox_irq(mbox, IRQ_TX);
211 spin_lock(q->queue_lock);
212 blk_requeue_request(q, rq);
213 spin_unlock(q->queue_lock);
191 return; 214 return;
192 } 215 }
193 216
194 spin_lock(q->queue_lock); 217 spin_lock(q->queue_lock);
195 if (__blk_end_request(rq, 0, 0)) 218 __blk_end_request_all(rq, 0);
196 BUG();
197 spin_unlock(q->queue_lock); 219 spin_unlock(q->queue_lock);
198 } 220 }
199} 221}
@@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
218 240
219 while (1) { 241 while (1) {
220 spin_lock_irqsave(q->queue_lock, flags); 242 spin_lock_irqsave(q->queue_lock, flags);
221 rq = elv_next_request(q); 243 rq = blk_fetch_request(q);
222 spin_unlock_irqrestore(q->queue_lock, flags); 244 spin_unlock_irqrestore(q->queue_lock, flags);
223 if (!rq) 245 if (!rq)
224 break; 246 break;
225 247
226 msg = (mbox_msg_t) rq->data; 248 msg = (mbox_msg_t)rq->special;
227 249 blk_end_request_all(rq, 0);
228 if (blk_end_request(rq, 0, 0))
229 BUG();
230
231 mbox->rxq->callback((void *)msg); 250 mbox->rxq->callback((void *)msg);
232 } 251 }
233} 252}
@@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
264 goto nomem; 283 goto nomem;
265 284
266 msg = mbox_fifo_read(mbox); 285 msg = mbox_fifo_read(mbox);
267 rq->data = (void *)msg;
268 286
269 if (unlikely(mbox_seq_test(mbox, msg))) { 287 if (unlikely(mbox_seq_test(mbox, msg))) {
270 pr_info("mbox: Illegal seq bit!(%08x)\n", msg); 288 pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
272 mbox->err_notify(); 290 mbox->err_notify();
273 } 291 }
274 292
275 blk_insert_request(q, rq, 0, NULL); 293 blk_insert_request(q, rq, 0, (void *)msg);
276 if (mbox->ops->type == OMAP_MBOX_TYPE1) 294 if (mbox->ops->type == OMAP_MBOX_TYPE1)
277 break; 295 break;
278 } 296 }
@@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
329 347
330 while (1) { 348 while (1) {
331 spin_lock_irqsave(q->queue_lock, flags); 349 spin_lock_irqsave(q->queue_lock, flags);
332 rq = elv_next_request(q); 350 rq = blk_fetch_request(q);
333 spin_unlock_irqrestore(q->queue_lock, flags); 351 spin_unlock_irqrestore(q->queue_lock, flags);
334 352
335 if (!rq) 353 if (!rq)
336 break; 354 break;
337 355
338 *p = (mbox_msg_t) rq->data; 356 *p = (mbox_msg_t)rq->special;
339 357
340 if (blk_end_request(rq, 0, 0)) 358 blk_end_request_all(rq, 0);
341 BUG();
342 359
343 if (unlikely(mbox_seq_test(mbox, *p))) { 360 if (unlikely(mbox_seq_test(mbox, *p))) {
344 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); 361 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 9e105cbc5e5f..a4779912a5ca 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
250 250
251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); 251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request); 252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
253 blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 253 blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
254 add_disk(bank->disk); 254 add_disk(bank->disk);
255 255
256 bank->irq_id = irq_of_parse_and_map(device->node, 0); 256 bank->irq_id = irq_of_parse_and_map(device->node, 0);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8ef..aa9e926e13d7 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
451 451
452/* Only changed by ubd_init, which is an initcall. */ 452/* Only changed by ubd_init, which is an initcall. */
453static int thread_fd = -1; 453static int thread_fd = -1;
454
455static void ubd_end_request(struct request *req, int bytes, int error)
456{
457 blk_end_request(req, error, bytes);
458}
459
460/* Callable only from interrupt context - otherwise you need to do
461 * spin_lock_irq()/spin_lock_irqsave() */
462static inline void ubd_finish(struct request *req, int bytes)
463{
464 if(bytes < 0){
465 ubd_end_request(req, 0, -EIO);
466 return;
467 }
468 ubd_end_request(req, bytes, 0);
469}
470
471static LIST_HEAD(restart); 454static LIST_HEAD(restart);
472 455
473/* XXX - move this inside ubd_intr. */ 456/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
475static void ubd_handler(void) 458static void ubd_handler(void)
476{ 459{
477 struct io_thread_req *req; 460 struct io_thread_req *req;
478 struct request *rq;
479 struct ubd *ubd; 461 struct ubd *ubd;
480 struct list_head *list, *next_ele; 462 struct list_head *list, *next_ele;
481 unsigned long flags; 463 unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
492 return; 474 return;
493 } 475 }
494 476
495 rq = req->req; 477 blk_end_request(req->req, 0, req->length);
496 rq->nr_sectors -= req->length >> 9;
497 if(rq->nr_sectors == 0)
498 ubd_finish(rq, rq->hard_nr_sectors << 9);
499 kfree(req); 478 kfree(req);
500 } 479 }
501 reactivate_fd(thread_fd, UBD_IRQ); 480 reactivate_fd(thread_fd, UBD_IRQ);
@@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
1243{ 1222{
1244 struct io_thread_req *io_req; 1223 struct io_thread_req *io_req;
1245 struct request *req; 1224 struct request *req;
1246 int n, last_sectors; 1225 sector_t sector;
1226 int n;
1247 1227
1248 while(1){ 1228 while(1){
1249 struct ubd *dev = q->queuedata; 1229 struct ubd *dev = q->queuedata;
1250 if(dev->end_sg == 0){ 1230 if(dev->end_sg == 0){
1251 struct request *req = elv_next_request(q); 1231 struct request *req = blk_fetch_request(q);
1252 if(req == NULL) 1232 if(req == NULL)
1253 return; 1233 return;
1254 1234
1255 dev->request = req; 1235 dev->request = req;
1256 blkdev_dequeue_request(req);
1257 dev->start_sg = 0; 1236 dev->start_sg = 0;
1258 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1237 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
1259 } 1238 }
1260 1239
1261 req = dev->request; 1240 req = dev->request;
1262 last_sectors = 0; 1241 sector = blk_rq_pos(req);
1263 while(dev->start_sg < dev->end_sg){ 1242 while(dev->start_sg < dev->end_sg){
1264 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1243 struct scatterlist *sg = &dev->sg[dev->start_sg];
1265 1244
1266 req->sector += last_sectors;
1267 io_req = kmalloc(sizeof(struct io_thread_req), 1245 io_req = kmalloc(sizeof(struct io_thread_req),
1268 GFP_ATOMIC); 1246 GFP_ATOMIC);
1269 if(io_req == NULL){ 1247 if(io_req == NULL){
@@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
1272 return; 1250 return;
1273 } 1251 }
1274 prepare_request(req, io_req, 1252 prepare_request(req, io_req,
1275 (unsigned long long) req->sector << 9, 1253 (unsigned long long)sector << 9,
1276 sg->offset, sg->length, sg_page(sg)); 1254 sg->offset, sg->length, sg_page(sg));
1277 1255
1278 last_sectors = sg->length >> 9; 1256 sector += sg->length >> 9;
1279 n = os_write_file(thread_fd, &io_req, 1257 n = os_write_file(thread_fd, &io_req,
1280 sizeof(struct io_thread_req *)); 1258 sizeof(struct io_thread_req *));
1281 if(n != sizeof(struct io_thread_req *)){ 1259 if(n != sizeof(struct io_thread_req *)){
diff --git a/block/Kconfig b/block/Kconfig
index e7d12782bcfb..2c39527aa7db 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,6 +26,7 @@ if BLOCK
26config LBD 26config LBD
27 bool "Support for large block devices and files" 27 bool "Support for large block devices and files"
28 depends on !64BIT 28 depends on !64BIT
29 default y
29 help 30 help
30 Enable block devices or files of size 2TB and larger. 31 Enable block devices or files of size 2TB and larger.
31 32
@@ -38,11 +39,13 @@ config LBD
38 39
39 The ext4 filesystem requires that this feature be enabled in 40 The ext4 filesystem requires that this feature be enabled in
40 order to support filesystems that have the huge_file feature 41 order to support filesystems that have the huge_file feature
41 enabled. Otherwise, it will refuse to mount any filesystems 42 enabled. Otherwise, it will refuse to mount in the read-write
42 that use the huge_file feature, which is enabled by default 43 mode any filesystems that use the huge_file feature, which is
43 by mke2fs.ext4. The GFS2 filesystem also requires this feature. 44 enabled by default by mke2fs.ext4.
44 45
45 If unsure, say N. 46 The GFS2 filesystem also requires this feature.
47
48 If unsure, say Y.
46 49
47config BLK_DEV_BSG 50config BLK_DEV_BSG
48 bool "Block layer SG support v4 (EXPERIMENTAL)" 51 bool "Block layer SG support v4 (EXPERIMENTAL)"
diff --git a/block/as-iosched.c b/block/as-iosched.c
index c48fa670d221..7a12cf6ee1d3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
306 data_dir = rq_is_sync(rq1); 306 data_dir = rq_is_sync(rq1);
307 307
308 last = ad->last_sector[data_dir]; 308 last = ad->last_sector[data_dir];
309 s1 = rq1->sector; 309 s1 = blk_rq_pos(rq1);
310 s2 = rq2->sector; 310 s2 = blk_rq_pos(rq2);
311 311
312 BUG_ON(data_dir != rq_is_sync(rq2)); 312 BUG_ON(data_dir != rq_is_sync(rq2));
313 313
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
566 as_update_thinktime(ad, aic, thinktime); 566 as_update_thinktime(ad, aic, thinktime);
567 567
568 /* Calculate read -> read seek distance */ 568 /* Calculate read -> read seek distance */
569 if (aic->last_request_pos < rq->sector) 569 if (aic->last_request_pos < blk_rq_pos(rq))
570 seek_dist = rq->sector - aic->last_request_pos; 570 seek_dist = blk_rq_pos(rq) -
571 aic->last_request_pos;
571 else 572 else
572 seek_dist = aic->last_request_pos - rq->sector; 573 seek_dist = aic->last_request_pos -
574 blk_rq_pos(rq);
573 as_update_seekdist(ad, aic, seek_dist); 575 as_update_seekdist(ad, aic, seek_dist);
574 } 576 }
575 aic->last_request_pos = rq->sector + rq->nr_sectors; 577 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
576 set_bit(AS_TASK_IOSTARTED, &aic->state); 578 set_bit(AS_TASK_IOSTARTED, &aic->state);
577 spin_unlock(&aic->lock); 579 spin_unlock(&aic->lock);
578 } 580 }
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
587{ 589{
588 unsigned long delay; /* jiffies */ 590 unsigned long delay; /* jiffies */
589 sector_t last = ad->last_sector[ad->batch_data_dir]; 591 sector_t last = ad->last_sector[ad->batch_data_dir];
590 sector_t next = rq->sector; 592 sector_t next = blk_rq_pos(rq);
591 sector_t delta; /* acceptable close offset (in sectors) */ 593 sector_t delta; /* acceptable close offset (in sectors) */
592 sector_t s; 594 sector_t s;
593 595
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
981 * This has to be set in order to be correctly updated by 983 * This has to be set in order to be correctly updated by
982 * as_find_next_rq 984 * as_find_next_rq
983 */ 985 */
984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 986 ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
985 987
986 if (data_dir == BLK_RW_SYNC) { 988 if (data_dir == BLK_RW_SYNC) {
987 struct io_context *ioc = RQ_IOC(rq); 989 struct io_context *ioc = RQ_IOC(rq);
@@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
1312static void as_work_handler(struct work_struct *work) 1314static void as_work_handler(struct work_struct *work)
1313{ 1315{
1314 struct as_data *ad = container_of(work, struct as_data, antic_work); 1316 struct as_data *ad = container_of(work, struct as_data, antic_work);
1315 struct request_queue *q = ad->q;
1316 unsigned long flags;
1317 1317
1318 spin_lock_irqsave(q->queue_lock, flags); 1318 blk_run_queue(ad->q);
1319 blk_start_queueing(q);
1320 spin_unlock_irqrestore(q->queue_lock, flags);
1321} 1319}
1322 1320
1323static int as_may_queue(struct request_queue *q, int rw) 1321static int as_may_queue(struct request_queue *q, int rw)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111fa050..30022b4e2f63 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
106 */ 106 */
107 q->ordseq = 0; 107 q->ordseq = 0;
108 rq = q->orig_bar_rq; 108 rq = q->orig_bar_rq;
109 109 __blk_end_request_all(rq, q->orderr);
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG();
112
113 return true; 110 return true;
114} 111}
115 112
@@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
166 * For an empty barrier, there's no actual BAR request, which 163 * For an empty barrier, there's no actual BAR request, which
167 * in turn makes POSTFLUSH unnecessary. Mask them off. 164 * in turn makes POSTFLUSH unnecessary. Mask them off.
168 */ 165 */
169 if (!rq->hard_nr_sectors) { 166 if (!blk_rq_sectors(rq)) {
170 q->ordered &= ~(QUEUE_ORDERED_DO_BAR | 167 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
171 QUEUE_ORDERED_DO_POSTFLUSH); 168 QUEUE_ORDERED_DO_POSTFLUSH);
172 /* 169 /*
@@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
183 } 180 }
184 181
185 /* stash away the original request */ 182 /* stash away the original request */
186 elv_dequeue_request(q, rq); 183 blk_dequeue_request(rq);
187 q->orig_bar_rq = rq; 184 q->orig_bar_rq = rq;
188 rq = NULL; 185 rq = NULL;
189 186
@@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
221 } else 218 } else
222 skip |= QUEUE_ORDSEQ_PREFLUSH; 219 skip |= QUEUE_ORDSEQ_PREFLUSH;
223 220
224 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 221 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
225 rq = NULL; 222 rq = NULL;
226 else 223 else
227 skip |= QUEUE_ORDSEQ_DRAIN; 224 skip |= QUEUE_ORDSEQ_DRAIN;
@@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
251 * Queue ordering not supported. Terminate 248 * Queue ordering not supported. Terminate
252 * with prejudice. 249 * with prejudice.
253 */ 250 */
254 elv_dequeue_request(q, rq); 251 blk_dequeue_request(rq);
255 if (__blk_end_request(rq, -EOPNOTSUPP, 252 __blk_end_request_all(rq, -EOPNOTSUPP);
256 blk_rq_bytes(rq)))
257 BUG();
258 *rqp = NULL; 253 *rqp = NULL;
259 return false; 254 return false;
260 } 255 }
@@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
329 /* 324 /*
330 * The driver must store the error location in ->bi_sector, if 325 * The driver must store the error location in ->bi_sector, if
331 * it supports it. For non-stacked drivers, this should be copied 326 * it supports it. For non-stacked drivers, this should be copied
332 * from rq->sector. 327 * from blk_rq_pos(rq).
333 */ 328 */
334 if (error_sector) 329 if (error_sector)
335 *error_sector = bio->bi_sector; 330 *error_sector = bio->bi_sector;
@@ -393,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
393 388
394 bio->bi_sector = sector; 389 bio->bi_sector = sector;
395 390
396 if (nr_sects > q->max_hw_sectors) { 391 if (nr_sects > queue_max_hw_sectors(q)) {
397 bio->bi_size = q->max_hw_sectors << 9; 392 bio->bi_size = queue_max_hw_sectors(q) << 9;
398 nr_sects -= q->max_hw_sectors; 393 nr_sects -= queue_max_hw_sectors(q);
399 sector += q->max_hw_sectors; 394 sector += queue_max_hw_sectors(q);
400 } else { 395 } else {
401 bio->bi_size = nr_sects << 9; 396 bio->bi_size = nr_sects << 9;
402 nr_sects = 0; 397 nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 648f15cb41f1..d17d71c71d4f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -60,11 +60,11 @@ static void drive_stat_acct(struct request *rq, int new_io)
60 int rw = rq_data_dir(rq); 60 int rw = rq_data_dir(rq);
61 int cpu; 61 int cpu;
62 62
63 if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) 63 if (!blk_do_io_stat(rq))
64 return; 64 return;
65 65
66 cpu = part_stat_lock(); 66 cpu = part_stat_lock();
67 part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 67 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
68 68
69 if (!new_io) 69 if (!new_io)
70 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
@@ -119,13 +119,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
119 INIT_LIST_HEAD(&rq->timeout_list); 119 INIT_LIST_HEAD(&rq->timeout_list);
120 rq->cpu = -1; 120 rq->cpu = -1;
121 rq->q = q; 121 rq->q = q;
122 rq->sector = rq->hard_sector = (sector_t) -1; 122 rq->__sector = (sector_t) -1;
123 INIT_HLIST_NODE(&rq->hash); 123 INIT_HLIST_NODE(&rq->hash);
124 RB_CLEAR_NODE(&rq->rb_node); 124 RB_CLEAR_NODE(&rq->rb_node);
125 rq->cmd = rq->__cmd; 125 rq->cmd = rq->__cmd;
126 rq->cmd_len = BLK_MAX_CDB; 126 rq->cmd_len = BLK_MAX_CDB;
127 rq->tag = -1; 127 rq->tag = -1;
128 rq->ref_count = 1; 128 rq->ref_count = 1;
129 rq->start_time = jiffies;
129} 130}
130EXPORT_SYMBOL(blk_rq_init); 131EXPORT_SYMBOL(blk_rq_init);
131 132
@@ -176,14 +177,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
176 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 177 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
177 rq->cmd_flags); 178 rq->cmd_flags);
178 179
179 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 180 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
180 (unsigned long long)rq->sector, 181 (unsigned long long)blk_rq_pos(rq),
181 rq->nr_sectors, 182 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
182 rq->current_nr_sectors); 183 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
183 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 184 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
184 rq->bio, rq->biotail,
185 rq->buffer, rq->data,
186 rq->data_len);
187 185
188 if (blk_pc_request(rq)) { 186 if (blk_pc_request(rq)) {
189 printk(KERN_INFO " cdb: "); 187 printk(KERN_INFO " cdb: ");
@@ -325,24 +323,6 @@ void blk_unplug(struct request_queue *q)
325} 323}
326EXPORT_SYMBOL(blk_unplug); 324EXPORT_SYMBOL(blk_unplug);
327 325
328static void blk_invoke_request_fn(struct request_queue *q)
329{
330 if (unlikely(blk_queue_stopped(q)))
331 return;
332
333 /*
334 * one level of recursion is ok and is much faster than kicking
335 * the unplug handling
336 */
337 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
338 q->request_fn(q);
339 queue_flag_clear(QUEUE_FLAG_REENTER, q);
340 } else {
341 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
342 kblockd_schedule_work(q, &q->unplug_work);
343 }
344}
345
346/** 326/**
347 * blk_start_queue - restart a previously stopped queue 327 * blk_start_queue - restart a previously stopped queue
348 * @q: The &struct request_queue in question 328 * @q: The &struct request_queue in question
@@ -357,7 +337,7 @@ void blk_start_queue(struct request_queue *q)
357 WARN_ON(!irqs_disabled()); 337 WARN_ON(!irqs_disabled());
358 338
359 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 339 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
360 blk_invoke_request_fn(q); 340 __blk_run_queue(q);
361} 341}
362EXPORT_SYMBOL(blk_start_queue); 342EXPORT_SYMBOL(blk_start_queue);
363 343
@@ -417,12 +397,23 @@ void __blk_run_queue(struct request_queue *q)
417{ 397{
418 blk_remove_plug(q); 398 blk_remove_plug(q);
419 399
400 if (unlikely(blk_queue_stopped(q)))
401 return;
402
403 if (elv_queue_empty(q))
404 return;
405
420 /* 406 /*
421 * Only recurse once to avoid overrunning the stack, let the unplug 407 * Only recurse once to avoid overrunning the stack, let the unplug
422 * handling reinvoke the handler shortly if we already got there. 408 * handling reinvoke the handler shortly if we already got there.
423 */ 409 */
424 if (!elv_queue_empty(q)) 410 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
425 blk_invoke_request_fn(q); 411 q->request_fn(q);
412 queue_flag_clear(QUEUE_FLAG_REENTER, q);
413 } else {
414 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
415 kblockd_schedule_work(q, &q->unplug_work);
416 }
426} 417}
427EXPORT_SYMBOL(__blk_run_queue); 418EXPORT_SYMBOL(__blk_run_queue);
428 419
@@ -432,9 +423,7 @@ EXPORT_SYMBOL(__blk_run_queue);
432 * 423 *
433 * Description: 424 * Description:
434 * Invoke request handling on this queue, if it has pending work to do. 425 * Invoke request handling on this queue, if it has pending work to do.
435 * May be used to restart queueing when a request has completed. Also 426 * May be used to restart queueing when a request has completed.
436 * See @blk_start_queueing.
437 *
438 */ 427 */
439void blk_run_queue(struct request_queue *q) 428void blk_run_queue(struct request_queue *q)
440{ 429{
@@ -894,26 +883,58 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
894EXPORT_SYMBOL(blk_get_request); 883EXPORT_SYMBOL(blk_get_request);
895 884
896/** 885/**
897 * blk_start_queueing - initiate dispatch of requests to device 886 * blk_make_request - given a bio, allocate a corresponding struct request.
898 * @q: request queue to kick into gear 887 *
888 * @bio: The bio describing the memory mappings that will be submitted for IO.
889 * It may be a chained-bio properly constructed by block/bio layer.
899 * 890 *
900 * This is basically a helper to remove the need to know whether a queue 891 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
901 * is plugged or not if someone just wants to initiate dispatch of requests 892 * type commands. Where the struct request needs to be farther initialized by
902 * for this queue. Should be used to start queueing on a device outside 893 * the caller. It is passed a &struct bio, which describes the memory info of
903 * of ->request_fn() context. Also see @blk_run_queue. 894 * the I/O transfer.
904 * 895 *
905 * The queue lock must be held with interrupts disabled. 896 * The caller of blk_make_request must make sure that bi_io_vec
897 * are set to describe the memory buffers. That bio_data_dir() will return
898 * the needed direction of the request. (And all bio's in the passed bio-chain
899 * are properly set accordingly)
900 *
901 * If called under none-sleepable conditions, mapped bio buffers must not
902 * need bouncing, by calling the appropriate masked or flagged allocator,
903 * suitable for the target device. Otherwise the call to blk_queue_bounce will
904 * BUG.
905 *
906 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
907 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
908 * anything but the first bio in the chain. Otherwise you risk waiting for IO
909 * completion of a bio that hasn't been submitted yet, thus resulting in a
910 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
911 * of bio_alloc(), as that avoids the mempool deadlock.
912 * If possible a big IO should be split into smaller parts when allocation
913 * fails. Partial allocation should not be an error, or you risk a live-lock.
906 */ 914 */
907void blk_start_queueing(struct request_queue *q) 915struct request *blk_make_request(struct request_queue *q, struct bio *bio,
916 gfp_t gfp_mask)
908{ 917{
909 if (!blk_queue_plugged(q)) { 918 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
910 if (unlikely(blk_queue_stopped(q))) 919
911 return; 920 if (unlikely(!rq))
912 q->request_fn(q); 921 return ERR_PTR(-ENOMEM);
913 } else 922
914 __generic_unplug_device(q); 923 for_each_bio(bio) {
924 struct bio *bounce_bio = bio;
925 int ret;
926
927 blk_queue_bounce(q, &bounce_bio);
928 ret = blk_rq_append_bio(q, rq, bounce_bio);
929 if (unlikely(ret)) {
930 blk_put_request(rq);
931 return ERR_PTR(ret);
932 }
933 }
934
935 return rq;
915} 936}
916EXPORT_SYMBOL(blk_start_queueing); 937EXPORT_SYMBOL(blk_make_request);
917 938
918/** 939/**
919 * blk_requeue_request - put a request back on queue 940 * blk_requeue_request - put a request back on queue
@@ -934,6 +955,8 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
934 if (blk_rq_tagged(rq)) 955 if (blk_rq_tagged(rq))
935 blk_queue_end_tag(q, rq); 956 blk_queue_end_tag(q, rq);
936 957
958 BUG_ON(blk_queued_rq(rq));
959
937 elv_requeue_request(q, rq); 960 elv_requeue_request(q, rq);
938} 961}
939EXPORT_SYMBOL(blk_requeue_request); 962EXPORT_SYMBOL(blk_requeue_request);
@@ -969,7 +992,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
969 * barrier 992 * barrier
970 */ 993 */
971 rq->cmd_type = REQ_TYPE_SPECIAL; 994 rq->cmd_type = REQ_TYPE_SPECIAL;
972 rq->cmd_flags |= REQ_SOFTBARRIER;
973 995
974 rq->special = data; 996 rq->special = data;
975 997
@@ -983,7 +1005,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
983 1005
984 drive_stat_acct(rq, 1); 1006 drive_stat_acct(rq, 1);
985 __elv_add_request(q, rq, where, 0); 1007 __elv_add_request(q, rq, where, 0);
986 blk_start_queueing(q); 1008 __blk_run_queue(q);
987 spin_unlock_irqrestore(q->queue_lock, flags); 1009 spin_unlock_irqrestore(q->queue_lock, flags);
988} 1010}
989EXPORT_SYMBOL(blk_insert_request); 1011EXPORT_SYMBOL(blk_insert_request);
@@ -1105,16 +1127,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1105 if (bio_failfast_driver(bio)) 1127 if (bio_failfast_driver(bio))
1106 req->cmd_flags |= REQ_FAILFAST_DRIVER; 1128 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1107 1129
1108 /*
1109 * REQ_BARRIER implies no merging, but lets make it explicit
1110 */
1111 if (unlikely(bio_discard(bio))) { 1130 if (unlikely(bio_discard(bio))) {
1112 req->cmd_flags |= REQ_DISCARD; 1131 req->cmd_flags |= REQ_DISCARD;
1113 if (bio_barrier(bio)) 1132 if (bio_barrier(bio))
1114 req->cmd_flags |= REQ_SOFTBARRIER; 1133 req->cmd_flags |= REQ_SOFTBARRIER;
1115 req->q->prepare_discard_fn(req->q, req); 1134 req->q->prepare_discard_fn(req->q, req);
1116 } else if (unlikely(bio_barrier(bio))) 1135 } else if (unlikely(bio_barrier(bio)))
1117 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 1136 req->cmd_flags |= REQ_HARDBARRIER;
1118 1137
1119 if (bio_sync(bio)) 1138 if (bio_sync(bio))
1120 req->cmd_flags |= REQ_RW_SYNC; 1139 req->cmd_flags |= REQ_RW_SYNC;
@@ -1124,9 +1143,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1124 req->cmd_flags |= REQ_NOIDLE; 1143 req->cmd_flags |= REQ_NOIDLE;
1125 1144
1126 req->errors = 0; 1145 req->errors = 0;
1127 req->hard_sector = req->sector = bio->bi_sector; 1146 req->__sector = bio->bi_sector;
1128 req->ioprio = bio_prio(bio); 1147 req->ioprio = bio_prio(bio);
1129 req->start_time = jiffies;
1130 blk_rq_bio_prep(req->q, req, bio); 1148 blk_rq_bio_prep(req->q, req, bio);
1131} 1149}
1132 1150
@@ -1142,14 +1160,13 @@ static inline bool queue_should_plug(struct request_queue *q)
1142static int __make_request(struct request_queue *q, struct bio *bio) 1160static int __make_request(struct request_queue *q, struct bio *bio)
1143{ 1161{
1144 struct request *req; 1162 struct request *req;
1145 int el_ret, nr_sectors; 1163 int el_ret;
1164 unsigned int bytes = bio->bi_size;
1146 const unsigned short prio = bio_prio(bio); 1165 const unsigned short prio = bio_prio(bio);
1147 const int sync = bio_sync(bio); 1166 const int sync = bio_sync(bio);
1148 const int unplug = bio_unplug(bio); 1167 const int unplug = bio_unplug(bio);
1149 int rw_flags; 1168 int rw_flags;
1150 1169
1151 nr_sectors = bio_sectors(bio);
1152
1153 /* 1170 /*
1154 * low level driver can indicate that it wants pages above a 1171 * low level driver can indicate that it wants pages above a
1155 * certain limit bounced to low memory (ie for highmem, or even 1172 * certain limit bounced to low memory (ie for highmem, or even
@@ -1174,7 +1191,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1174 1191
1175 req->biotail->bi_next = bio; 1192 req->biotail->bi_next = bio;
1176 req->biotail = bio; 1193 req->biotail = bio;
1177 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1194 req->__data_len += bytes;
1178 req->ioprio = ioprio_best(req->ioprio, prio); 1195 req->ioprio = ioprio_best(req->ioprio, prio);
1179 if (!blk_rq_cpu_valid(req)) 1196 if (!blk_rq_cpu_valid(req))
1180 req->cpu = bio->bi_comp_cpu; 1197 req->cpu = bio->bi_comp_cpu;
@@ -1200,10 +1217,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1200 * not touch req->buffer either... 1217 * not touch req->buffer either...
1201 */ 1218 */
1202 req->buffer = bio_data(bio); 1219 req->buffer = bio_data(bio);
1203 req->current_nr_sectors = bio_cur_sectors(bio); 1220 req->__sector = bio->bi_sector;
1204 req->hard_cur_sectors = req->current_nr_sectors; 1221 req->__data_len += bytes;
1205 req->sector = req->hard_sector = bio->bi_sector;
1206 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1207 req->ioprio = ioprio_best(req->ioprio, prio); 1222 req->ioprio = ioprio_best(req->ioprio, prio);
1208 if (!blk_rq_cpu_valid(req)) 1223 if (!blk_rq_cpu_valid(req))
1209 req->cpu = bio->bi_comp_cpu; 1224 req->cpu = bio->bi_comp_cpu;
@@ -1414,11 +1429,11 @@ static inline void __generic_make_request(struct bio *bio)
1414 goto end_io; 1429 goto end_io;
1415 } 1430 }
1416 1431
1417 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1432 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
1418 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1433 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1419 bdevname(bio->bi_bdev, b), 1434 bdevname(bio->bi_bdev, b),
1420 bio_sectors(bio), 1435 bio_sectors(bio),
1421 q->max_hw_sectors); 1436 queue_max_hw_sectors(q));
1422 goto end_io; 1437 goto end_io;
1423 } 1438 }
1424 1439
@@ -1584,8 +1599,8 @@ EXPORT_SYMBOL(submit_bio);
1584 */ 1599 */
1585int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1600int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1586{ 1601{
1587 if (rq->nr_sectors > q->max_sectors || 1602 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1588 rq->data_len > q->max_hw_sectors << 9) { 1603 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1589 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1604 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1590 return -EIO; 1605 return -EIO;
1591 } 1606 }
@@ -1597,8 +1612,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1597 * limitation. 1612 * limitation.
1598 */ 1613 */
1599 blk_recalc_rq_segments(rq); 1614 blk_recalc_rq_segments(rq);
1600 if (rq->nr_phys_segments > q->max_phys_segments || 1615 if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
1601 rq->nr_phys_segments > q->max_hw_segments) { 1616 rq->nr_phys_segments > queue_max_hw_segments(q)) {
1602 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1617 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1603 return -EIO; 1618 return -EIO;
1604 } 1619 }
@@ -1642,40 +1657,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1642} 1657}
1643EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1658EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1644 1659
1645/**
1646 * blkdev_dequeue_request - dequeue request and start timeout timer
1647 * @req: request to dequeue
1648 *
1649 * Dequeue @req and start timeout timer on it. This hands off the
1650 * request to the driver.
1651 *
1652 * Block internal functions which don't want to start timer should
1653 * call elv_dequeue_request().
1654 */
1655void blkdev_dequeue_request(struct request *req)
1656{
1657 elv_dequeue_request(req->q, req);
1658
1659 /*
1660 * We are now handing the request to the hardware, add the
1661 * timeout handler.
1662 */
1663 blk_add_timer(req);
1664}
1665EXPORT_SYMBOL(blkdev_dequeue_request);
1666
1667static void blk_account_io_completion(struct request *req, unsigned int bytes) 1660static void blk_account_io_completion(struct request *req, unsigned int bytes)
1668{ 1661{
1669 if (!blk_do_io_stat(req)) 1662 if (blk_do_io_stat(req)) {
1670 return;
1671
1672 if (blk_fs_request(req)) {
1673 const int rw = rq_data_dir(req); 1663 const int rw = rq_data_dir(req);
1674 struct hd_struct *part; 1664 struct hd_struct *part;
1675 int cpu; 1665 int cpu;
1676 1666
1677 cpu = part_stat_lock(); 1667 cpu = part_stat_lock();
1678 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1668 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1679 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1669 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1680 part_stat_unlock(); 1670 part_stat_unlock();
1681 } 1671 }
@@ -1683,22 +1673,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1683 1673
1684static void blk_account_io_done(struct request *req) 1674static void blk_account_io_done(struct request *req)
1685{ 1675{
1686 if (!blk_do_io_stat(req))
1687 return;
1688
1689 /* 1676 /*
1690 * Account IO completion. bar_rq isn't accounted as a normal 1677 * Account IO completion. bar_rq isn't accounted as a normal
1691 * IO on queueing nor completion. Accounting the containing 1678 * IO on queueing nor completion. Accounting the containing
1692 * request is enough. 1679 * request is enough.
1693 */ 1680 */
1694 if (blk_fs_request(req) && req != &req->q->bar_rq) { 1681 if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
1695 unsigned long duration = jiffies - req->start_time; 1682 unsigned long duration = jiffies - req->start_time;
1696 const int rw = rq_data_dir(req); 1683 const int rw = rq_data_dir(req);
1697 struct hd_struct *part; 1684 struct hd_struct *part;
1698 int cpu; 1685 int cpu;
1699 1686
1700 cpu = part_stat_lock(); 1687 cpu = part_stat_lock();
1701 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1688 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1702 1689
1703 part_stat_inc(cpu, part, ios[rw]); 1690 part_stat_inc(cpu, part, ios[rw]);
1704 part_stat_add(cpu, part, ticks[rw], duration); 1691 part_stat_add(cpu, part, ticks[rw], duration);
@@ -1710,25 +1697,209 @@ static void blk_account_io_done(struct request *req)
1710} 1697}
1711 1698
1712/** 1699/**
1713 * __end_that_request_first - end I/O on a request 1700 * blk_peek_request - peek at the top of a request queue
1714 * @req: the request being processed 1701 * @q: request queue to peek at
1702 *
1703 * Description:
1704 * Return the request at the top of @q. The returned request
1705 * should be started using blk_start_request() before LLD starts
1706 * processing it.
1707 *
1708 * Return:
1709 * Pointer to the request at the top of @q if available. Null
1710 * otherwise.
1711 *
1712 * Context:
1713 * queue_lock must be held.
1714 */
1715struct request *blk_peek_request(struct request_queue *q)
1716{
1717 struct request *rq;
1718 int ret;
1719
1720 while ((rq = __elv_next_request(q)) != NULL) {
1721 if (!(rq->cmd_flags & REQ_STARTED)) {
1722 /*
1723 * This is the first time the device driver
1724 * sees this request (possibly after
1725 * requeueing). Notify IO scheduler.
1726 */
1727 if (blk_sorted_rq(rq))
1728 elv_activate_rq(q, rq);
1729
1730 /*
1731 * just mark as started even if we don't start
1732 * it, a request that has been delayed should
1733 * not be passed by new incoming requests
1734 */
1735 rq->cmd_flags |= REQ_STARTED;
1736 trace_block_rq_issue(q, rq);
1737 }
1738
1739 if (!q->boundary_rq || q->boundary_rq == rq) {
1740 q->end_sector = rq_end_sector(rq);
1741 q->boundary_rq = NULL;
1742 }
1743
1744 if (rq->cmd_flags & REQ_DONTPREP)
1745 break;
1746
1747 if (q->dma_drain_size && blk_rq_bytes(rq)) {
1748 /*
1749 * make sure space for the drain appears we
1750 * know we can do this because max_hw_segments
1751 * has been adjusted to be one fewer than the
1752 * device can handle
1753 */
1754 rq->nr_phys_segments++;
1755 }
1756
1757 if (!q->prep_rq_fn)
1758 break;
1759
1760 ret = q->prep_rq_fn(q, rq);
1761 if (ret == BLKPREP_OK) {
1762 break;
1763 } else if (ret == BLKPREP_DEFER) {
1764 /*
1765 * the request may have been (partially) prepped.
1766 * we need to keep this request in the front to
1767 * avoid resource deadlock. REQ_STARTED will
1768 * prevent other fs requests from passing this one.
1769 */
1770 if (q->dma_drain_size && blk_rq_bytes(rq) &&
1771 !(rq->cmd_flags & REQ_DONTPREP)) {
1772 /*
1773 * remove the space for the drain we added
1774 * so that we don't add it again
1775 */
1776 --rq->nr_phys_segments;
1777 }
1778
1779 rq = NULL;
1780 break;
1781 } else if (ret == BLKPREP_KILL) {
1782 rq->cmd_flags |= REQ_QUIET;
1783 /*
1784 * Mark this request as started so we don't trigger
1785 * any debug logic in the end I/O path.
1786 */
1787 blk_start_request(rq);
1788 __blk_end_request_all(rq, -EIO);
1789 } else {
1790 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1791 break;
1792 }
1793 }
1794
1795 return rq;
1796}
1797EXPORT_SYMBOL(blk_peek_request);
1798
1799void blk_dequeue_request(struct request *rq)
1800{
1801 struct request_queue *q = rq->q;
1802
1803 BUG_ON(list_empty(&rq->queuelist));
1804 BUG_ON(ELV_ON_HASH(rq));
1805
1806 list_del_init(&rq->queuelist);
1807
1808 /*
1809 * the time frame between a request being removed from the lists
1810 * and to it is freed is accounted as io that is in progress at
1811 * the driver side.
1812 */
1813 if (blk_account_rq(rq))
1814 q->in_flight[rq_is_sync(rq)]++;
1815}
1816
1817/**
1818 * blk_start_request - start request processing on the driver
1819 * @req: request to dequeue
1820 *
1821 * Description:
1822 * Dequeue @req and start timeout timer on it. This hands off the
1823 * request to the driver.
1824 *
1825 * Block internal functions which don't want to start timer should
1826 * call blk_dequeue_request().
1827 *
1828 * Context:
1829 * queue_lock must be held.
1830 */
1831void blk_start_request(struct request *req)
1832{
1833 blk_dequeue_request(req);
1834
1835 /*
1836 * We are now handing the request to the hardware, initialize
1837 * resid_len to full count and add the timeout handler.
1838 */
1839 req->resid_len = blk_rq_bytes(req);
1840 if (unlikely(blk_bidi_rq(req)))
1841 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
1842
1843 blk_add_timer(req);
1844}
1845EXPORT_SYMBOL(blk_start_request);
1846
1847/**
1848 * blk_fetch_request - fetch a request from a request queue
1849 * @q: request queue to fetch a request from
1850 *
1851 * Description:
1852 * Return the request at the top of @q. The request is started on
1853 * return and LLD can start processing it immediately.
1854 *
1855 * Return:
1856 * Pointer to the request at the top of @q if available. Null
1857 * otherwise.
1858 *
1859 * Context:
1860 * queue_lock must be held.
1861 */
1862struct request *blk_fetch_request(struct request_queue *q)
1863{
1864 struct request *rq;
1865
1866 rq = blk_peek_request(q);
1867 if (rq)
1868 blk_start_request(rq);
1869 return rq;
1870}
1871EXPORT_SYMBOL(blk_fetch_request);
1872
1873/**
1874 * blk_update_request - Special helper function for request stacking drivers
1875 * @rq: the request being processed
1715 * @error: %0 for success, < %0 for error 1876 * @error: %0 for success, < %0 for error
1716 * @nr_bytes: number of bytes to complete 1877 * @nr_bytes: number of bytes to complete @rq
1717 * 1878 *
1718 * Description: 1879 * Description:
1719 * Ends I/O on a number of bytes attached to @req, and sets it up 1880 * Ends I/O on a number of bytes attached to @rq, but doesn't complete
1720 * for the next range of segments (if any) in the cluster. 1881 * the request structure even if @rq doesn't have leftover.
1882 * If @rq has leftover, sets it up for the next range of segments.
1883 *
1884 * This special helper function is only for request stacking drivers
1885 * (e.g. request-based dm) so that they can handle partial completion.
1886 * Actual device drivers should use blk_end_request instead.
1887 *
1888 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1889 * %false return from this function.
1721 * 1890 *
1722 * Return: 1891 * Return:
1723 * %0 - we are done with this request, call end_that_request_last() 1892 * %false - this request doesn't have any more data
1724 * %1 - still buffers pending for this request 1893 * %true - this request has more data
1725 **/ 1894 **/
1726static int __end_that_request_first(struct request *req, int error, 1895bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1727 int nr_bytes)
1728{ 1896{
1729 int total_bytes, bio_nbytes, next_idx = 0; 1897 int total_bytes, bio_nbytes, next_idx = 0;
1730 struct bio *bio; 1898 struct bio *bio;
1731 1899
1900 if (!req->bio)
1901 return false;
1902
1732 trace_block_rq_complete(req->q, req); 1903 trace_block_rq_complete(req->q, req);
1733 1904
1734 /* 1905 /*
@@ -1745,7 +1916,7 @@ static int __end_that_request_first(struct request *req, int error,
1745 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1916 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1746 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1917 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1747 req->rq_disk ? req->rq_disk->disk_name : "?", 1918 req->rq_disk ? req->rq_disk->disk_name : "?",
1748 (unsigned long long)req->sector); 1919 (unsigned long long)blk_rq_pos(req));
1749 } 1920 }
1750 1921
1751 blk_account_io_completion(req, nr_bytes); 1922 blk_account_io_completion(req, nr_bytes);
@@ -1805,8 +1976,15 @@ static int __end_that_request_first(struct request *req, int error,
1805 /* 1976 /*
1806 * completely done 1977 * completely done
1807 */ 1978 */
1808 if (!req->bio) 1979 if (!req->bio) {
1809 return 0; 1980 /*
1981 * Reset counters so that the request stacking driver
1982 * can find how many bytes remain in the request
1983 * later.
1984 */
1985 req->__data_len = 0;
1986 return false;
1987 }
1810 1988
1811 /* 1989 /*
1812 * if the request wasn't completed, update state 1990 * if the request wasn't completed, update state
@@ -1818,21 +1996,55 @@ static int __end_that_request_first(struct request *req, int error,
1818 bio_iovec(bio)->bv_len -= nr_bytes; 1996 bio_iovec(bio)->bv_len -= nr_bytes;
1819 } 1997 }
1820 1998
1821 blk_recalc_rq_sectors(req, total_bytes >> 9); 1999 req->__data_len -= total_bytes;
2000 req->buffer = bio_data(req->bio);
2001
2002 /* update sector only for requests with clear definition of sector */
2003 if (blk_fs_request(req) || blk_discard_rq(req))
2004 req->__sector += total_bytes >> 9;
2005
2006 /*
2007 * If total number of sectors is less than the first segment
2008 * size, something has gone terribly wrong.
2009 */
2010 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2011 printk(KERN_ERR "blk: request botched\n");
2012 req->__data_len = blk_rq_cur_bytes(req);
2013 }
2014
2015 /* recalculate the number of segments */
1822 blk_recalc_rq_segments(req); 2016 blk_recalc_rq_segments(req);
1823 return 1; 2017
2018 return true;
2019}
2020EXPORT_SYMBOL_GPL(blk_update_request);
2021
2022static bool blk_update_bidi_request(struct request *rq, int error,
2023 unsigned int nr_bytes,
2024 unsigned int bidi_bytes)
2025{
2026 if (blk_update_request(rq, error, nr_bytes))
2027 return true;
2028
2029 /* Bidi request must be completed as a whole */
2030 if (unlikely(blk_bidi_rq(rq)) &&
2031 blk_update_request(rq->next_rq, error, bidi_bytes))
2032 return true;
2033
2034 add_disk_randomness(rq->rq_disk);
2035
2036 return false;
1824} 2037}
1825 2038
1826/* 2039/*
1827 * queue lock must be held 2040 * queue lock must be held
1828 */ 2041 */
1829static void end_that_request_last(struct request *req, int error) 2042static void blk_finish_request(struct request *req, int error)
1830{ 2043{
1831 if (blk_rq_tagged(req)) 2044 if (blk_rq_tagged(req))
1832 blk_queue_end_tag(req->q, req); 2045 blk_queue_end_tag(req->q, req);
1833 2046
1834 if (blk_queued_rq(req)) 2047 BUG_ON(blk_queued_rq(req));
1835 elv_dequeue_request(req->q, req);
1836 2048
1837 if (unlikely(laptop_mode) && blk_fs_request(req)) 2049 if (unlikely(laptop_mode) && blk_fs_request(req))
1838 laptop_io_completion(); 2050 laptop_io_completion();
@@ -1852,117 +2064,62 @@ static void end_that_request_last(struct request *req, int error)
1852} 2064}
1853 2065
1854/** 2066/**
1855 * blk_rq_bytes - Returns bytes left to complete in the entire request 2067 * blk_end_bidi_request - Complete a bidi request
1856 * @rq: the request being processed 2068 * @rq: the request to complete
1857 **/ 2069 * @error: %0 for success, < %0 for error
1858unsigned int blk_rq_bytes(struct request *rq) 2070 * @nr_bytes: number of bytes to complete @rq
1859{ 2071 * @bidi_bytes: number of bytes to complete @rq->next_rq
1860 if (blk_fs_request(rq))
1861 return rq->hard_nr_sectors << 9;
1862
1863 return rq->data_len;
1864}
1865EXPORT_SYMBOL_GPL(blk_rq_bytes);
1866
1867/**
1868 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1869 * @rq: the request being processed
1870 **/
1871unsigned int blk_rq_cur_bytes(struct request *rq)
1872{
1873 if (blk_fs_request(rq))
1874 return rq->current_nr_sectors << 9;
1875
1876 if (rq->bio)
1877 return rq->bio->bi_size;
1878
1879 return rq->data_len;
1880}
1881EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1882
1883/**
1884 * end_request - end I/O on the current segment of the request
1885 * @req: the request being processed
1886 * @uptodate: error value or %0/%1 uptodate flag
1887 * 2072 *
1888 * Description: 2073 * Description:
1889 * Ends I/O on the current segment of a request. If that is the only 2074 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1890 * remaining segment, the request is also completed and freed. 2075 * Drivers that supports bidi can safely call this member for any
1891 * 2076 * type of request, bidi or uni. In the later case @bidi_bytes is
1892 * This is a remnant of how older block drivers handled I/O completions. 2077 * just ignored.
1893 * Modern drivers typically end I/O on the full request in one go, unless 2078 *
1894 * they have a residual value to account for. For that case this function 2079 * Return:
1895 * isn't really useful, unless the residual just happens to be the 2080 * %false - we are done with this request
1896 * full current segment. In other words, don't use this function in new 2081 * %true - still buffers pending for this request
1897 * code. Use blk_end_request() or __blk_end_request() to end a request.
1898 **/ 2082 **/
1899void end_request(struct request *req, int uptodate) 2083static bool blk_end_bidi_request(struct request *rq, int error,
1900{
1901 int error = 0;
1902
1903 if (uptodate <= 0)
1904 error = uptodate ? uptodate : -EIO;
1905
1906 __blk_end_request(req, error, req->hard_cur_sectors << 9);
1907}
1908EXPORT_SYMBOL(end_request);
1909
1910static int end_that_request_data(struct request *rq, int error,
1911 unsigned int nr_bytes, unsigned int bidi_bytes) 2084 unsigned int nr_bytes, unsigned int bidi_bytes)
1912{ 2085{
1913 if (rq->bio) { 2086 struct request_queue *q = rq->q;
1914 if (__end_that_request_first(rq, error, nr_bytes)) 2087 unsigned long flags;
1915 return 1;
1916 2088
1917 /* Bidi request must be completed as a whole */ 2089 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
1918 if (blk_bidi_rq(rq) && 2090 return true;
1919 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1920 return 1;
1921 }
1922 2091
1923 return 0; 2092 spin_lock_irqsave(q->queue_lock, flags);
2093 blk_finish_request(rq, error);
2094 spin_unlock_irqrestore(q->queue_lock, flags);
2095
2096 return false;
1924} 2097}
1925 2098
1926/** 2099/**
1927 * blk_end_io - Generic end_io function to complete a request. 2100 * __blk_end_bidi_request - Complete a bidi request with queue lock held
1928 * @rq: the request being processed 2101 * @rq: the request to complete
1929 * @error: %0 for success, < %0 for error 2102 * @error: %0 for success, < %0 for error
1930 * @nr_bytes: number of bytes to complete @rq 2103 * @nr_bytes: number of bytes to complete @rq
1931 * @bidi_bytes: number of bytes to complete @rq->next_rq 2104 * @bidi_bytes: number of bytes to complete @rq->next_rq
1932 * @drv_callback: function called between completion of bios in the request
1933 * and completion of the request.
1934 * If the callback returns non %0, this helper returns without
1935 * completion of the request.
1936 * 2105 *
1937 * Description: 2106 * Description:
1938 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2107 * Identical to blk_end_bidi_request() except that queue lock is
1939 * If @rq has leftover, sets it up for the next range of segments. 2108 * assumed to be locked on entry and remains so on return.
1940 * 2109 *
1941 * Return: 2110 * Return:
1942 * %0 - we are done with this request 2111 * %false - we are done with this request
1943 * %1 - this request is not freed yet, it still has pending buffers. 2112 * %true - still buffers pending for this request
1944 **/ 2113 **/
1945static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 2114static bool __blk_end_bidi_request(struct request *rq, int error,
1946 unsigned int bidi_bytes, 2115 unsigned int nr_bytes, unsigned int bidi_bytes)
1947 int (drv_callback)(struct request *))
1948{ 2116{
1949 struct request_queue *q = rq->q; 2117 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
1950 unsigned long flags = 0UL; 2118 return true;
1951
1952 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1953 return 1;
1954
1955 /* Special feature for tricky drivers */
1956 if (drv_callback && drv_callback(rq))
1957 return 1;
1958
1959 add_disk_randomness(rq->rq_disk);
1960 2119
1961 spin_lock_irqsave(q->queue_lock, flags); 2120 blk_finish_request(rq, error);
1962 end_that_request_last(rq, error);
1963 spin_unlock_irqrestore(q->queue_lock, flags);
1964 2121
1965 return 0; 2122 return false;
1966} 2123}
1967 2124
1968/** 2125/**
@@ -1976,124 +2133,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1976 * If @rq has leftover, sets it up for the next range of segments. 2133 * If @rq has leftover, sets it up for the next range of segments.
1977 * 2134 *
1978 * Return: 2135 * Return:
1979 * %0 - we are done with this request 2136 * %false - we are done with this request
1980 * %1 - still buffers pending for this request 2137 * %true - still buffers pending for this request
1981 **/ 2138 **/
1982int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2139bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1983{ 2140{
1984 return blk_end_io(rq, error, nr_bytes, 0, NULL); 2141 return blk_end_bidi_request(rq, error, nr_bytes, 0);
1985} 2142}
1986EXPORT_SYMBOL_GPL(blk_end_request); 2143EXPORT_SYMBOL_GPL(blk_end_request);
1987 2144
1988/** 2145/**
1989 * __blk_end_request - Helper function for drivers to complete the request. 2146 * blk_end_request_all - Helper function for drives to finish the request.
1990 * @rq: the request being processed 2147 * @rq: the request to finish
1991 * @error: %0 for success, < %0 for error 2148 * @err: %0 for success, < %0 for error
1992 * @nr_bytes: number of bytes to complete
1993 * 2149 *
1994 * Description: 2150 * Description:
1995 * Must be called with queue lock held unlike blk_end_request(). 2151 * Completely finish @rq.
1996 * 2152 */
1997 * Return: 2153void blk_end_request_all(struct request *rq, int error)
1998 * %0 - we are done with this request
1999 * %1 - still buffers pending for this request
2000 **/
2001int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2002{ 2154{
2003 if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) 2155 bool pending;
2004 return 1; 2156 unsigned int bidi_bytes = 0;
2005 2157
2006 add_disk_randomness(rq->rq_disk); 2158 if (unlikely(blk_bidi_rq(rq)))
2159 bidi_bytes = blk_rq_bytes(rq->next_rq);
2007 2160
2008 end_that_request_last(rq, error); 2161 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2162 BUG_ON(pending);
2163}
2164EXPORT_SYMBOL_GPL(blk_end_request_all);
2009 2165
2010 return 0; 2166/**
2167 * blk_end_request_cur - Helper function to finish the current request chunk.
2168 * @rq: the request to finish the current chunk for
2169 * @err: %0 for success, < %0 for error
2170 *
2171 * Description:
2172 * Complete the current consecutively mapped chunk from @rq.
2173 *
2174 * Return:
2175 * %false - we are done with this request
2176 * %true - still buffers pending for this request
2177 */
2178bool blk_end_request_cur(struct request *rq, int error)
2179{
2180 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2011} 2181}
2012EXPORT_SYMBOL_GPL(__blk_end_request); 2182EXPORT_SYMBOL_GPL(blk_end_request_cur);
2013 2183
2014/** 2184/**
2015 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 2185 * __blk_end_request - Helper function for drivers to complete the request.
2016 * @rq: the bidi request being processed 2186 * @rq: the request being processed
2017 * @error: %0 for success, < %0 for error 2187 * @error: %0 for success, < %0 for error
2018 * @nr_bytes: number of bytes to complete @rq 2188 * @nr_bytes: number of bytes to complete
2019 * @bidi_bytes: number of bytes to complete @rq->next_rq
2020 * 2189 *
2021 * Description: 2190 * Description:
2022 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2191 * Must be called with queue lock held unlike blk_end_request().
2023 * 2192 *
2024 * Return: 2193 * Return:
2025 * %0 - we are done with this request 2194 * %false - we are done with this request
2026 * %1 - still buffers pending for this request 2195 * %true - still buffers pending for this request
2027 **/ 2196 **/
2028int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 2197bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2029 unsigned int bidi_bytes)
2030{ 2198{
2031 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 2199 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2032} 2200}
2033EXPORT_SYMBOL_GPL(blk_end_bidi_request); 2201EXPORT_SYMBOL_GPL(__blk_end_request);
2034 2202
2035/** 2203/**
2036 * blk_update_request - Special helper function for request stacking drivers 2204 * __blk_end_request_all - Helper function for drives to finish the request.
2037 * @rq: the request being processed 2205 * @rq: the request to finish
2038 * @error: %0 for success, < %0 for error 2206 * @err: %0 for success, < %0 for error
2039 * @nr_bytes: number of bytes to complete @rq
2040 * 2207 *
2041 * Description: 2208 * Description:
2042 * Ends I/O on a number of bytes attached to @rq, but doesn't complete 2209 * Completely finish @rq. Must be called with queue lock held.
2043 * the request structure even if @rq doesn't have leftover.
2044 * If @rq has leftover, sets it up for the next range of segments.
2045 *
2046 * This special helper function is only for request stacking drivers
2047 * (e.g. request-based dm) so that they can handle partial completion.
2048 * Actual device drivers should use blk_end_request instead.
2049 */ 2210 */
2050void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) 2211void __blk_end_request_all(struct request *rq, int error)
2051{ 2212{
2052 if (!end_that_request_data(rq, error, nr_bytes, 0)) { 2213 bool pending;
2053 /* 2214 unsigned int bidi_bytes = 0;
2054 * These members are not updated in end_that_request_data() 2215
2055 * when all bios are completed. 2216 if (unlikely(blk_bidi_rq(rq)))
2056 * Update them so that the request stacking driver can find 2217 bidi_bytes = blk_rq_bytes(rq->next_rq);
2057 * how many bytes remain in the request later. 2218
2058 */ 2219 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2059 rq->nr_sectors = rq->hard_nr_sectors = 0; 2220 BUG_ON(pending);
2060 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2061 }
2062} 2221}
2063EXPORT_SYMBOL_GPL(blk_update_request); 2222EXPORT_SYMBOL_GPL(__blk_end_request_all);
2064 2223
2065/** 2224/**
2066 * blk_end_request_callback - Special helper function for tricky drivers 2225 * __blk_end_request_cur - Helper function to finish the current request chunk.
2067 * @rq: the request being processed 2226 * @rq: the request to finish the current chunk for
2068 * @error: %0 for success, < %0 for error 2227 * @err: %0 for success, < %0 for error
2069 * @nr_bytes: number of bytes to complete
2070 * @drv_callback: function called between completion of bios in the request
2071 * and completion of the request.
2072 * If the callback returns non %0, this helper returns without
2073 * completion of the request.
2074 * 2228 *
2075 * Description: 2229 * Description:
2076 * Ends I/O on a number of bytes attached to @rq. 2230 * Complete the current consecutively mapped chunk from @rq. Must
2077 * If @rq has leftover, sets it up for the next range of segments. 2231 * be called with queue lock held.
2078 *
2079 * This special helper function is used only for existing tricky drivers.
2080 * (e.g. cdrom_newpc_intr() of ide-cd)
2081 * This interface will be removed when such drivers are rewritten.
2082 * Don't use this interface in other places anymore.
2083 * 2232 *
2084 * Return: 2233 * Return:
2085 * %0 - we are done with this request 2234 * %false - we are done with this request
2086 * %1 - this request is not freed yet. 2235 * %true - still buffers pending for this request
2087 * this request still has pending buffers or 2236 */
2088 * the driver doesn't want to finish this request yet. 2237bool __blk_end_request_cur(struct request *rq, int error)
2089 **/
2090int blk_end_request_callback(struct request *rq, int error,
2091 unsigned int nr_bytes,
2092 int (drv_callback)(struct request *))
2093{ 2238{
2094 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); 2239 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2095} 2240}
2096EXPORT_SYMBOL_GPL(blk_end_request_callback); 2241EXPORT_SYMBOL_GPL(__blk_end_request_cur);
2097 2242
2098void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2243void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2099 struct bio *bio) 2244 struct bio *bio)
@@ -2106,11 +2251,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2106 rq->nr_phys_segments = bio_phys_segments(q, bio); 2251 rq->nr_phys_segments = bio_phys_segments(q, bio);
2107 rq->buffer = bio_data(bio); 2252 rq->buffer = bio_data(bio);
2108 } 2253 }
2109 rq->current_nr_sectors = bio_cur_sectors(bio); 2254 rq->__data_len = bio->bi_size;
2110 rq->hard_cur_sectors = rq->current_nr_sectors;
2111 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2112 rq->data_len = bio->bi_size;
2113
2114 rq->bio = rq->biotail = bio; 2255 rq->bio = rq->biotail = bio;
2115 2256
2116 if (bio->bi_bdev) 2257 if (bio->bi_bdev)
@@ -2145,6 +2286,106 @@ int blk_lld_busy(struct request_queue *q)
2145} 2286}
2146EXPORT_SYMBOL_GPL(blk_lld_busy); 2287EXPORT_SYMBOL_GPL(blk_lld_busy);
2147 2288
2289/**
2290 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2291 * @rq: the clone request to be cleaned up
2292 *
2293 * Description:
2294 * Free all bios in @rq for a cloned request.
2295 */
2296void blk_rq_unprep_clone(struct request *rq)
2297{
2298 struct bio *bio;
2299
2300 while ((bio = rq->bio) != NULL) {
2301 rq->bio = bio->bi_next;
2302
2303 bio_put(bio);
2304 }
2305}
2306EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2307
2308/*
2309 * Copy attributes of the original request to the clone request.
2310 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2311 */
2312static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2313{
2314 dst->cpu = src->cpu;
2315 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
2316 dst->cmd_type = src->cmd_type;
2317 dst->__sector = blk_rq_pos(src);
2318 dst->__data_len = blk_rq_bytes(src);
2319 dst->nr_phys_segments = src->nr_phys_segments;
2320 dst->ioprio = src->ioprio;
2321 dst->extra_len = src->extra_len;
2322}
2323
2324/**
2325 * blk_rq_prep_clone - Helper function to setup clone request
2326 * @rq: the request to be setup
2327 * @rq_src: original request to be cloned
2328 * @bs: bio_set that bios for clone are allocated from
2329 * @gfp_mask: memory allocation mask for bio
2330 * @bio_ctr: setup function to be called for each clone bio.
2331 * Returns %0 for success, non %0 for failure.
2332 * @data: private data to be passed to @bio_ctr
2333 *
2334 * Description:
2335 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2336 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2337 * are not copied, and copying such parts is the caller's responsibility.
2338 * Also, pages which the original bios are pointing to are not copied
2339 * and the cloned bios just point same pages.
2340 * So cloned bios must be completed before original bios, which means
2341 * the caller must complete @rq before @rq_src.
2342 */
2343int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2344 struct bio_set *bs, gfp_t gfp_mask,
2345 int (*bio_ctr)(struct bio *, struct bio *, void *),
2346 void *data)
2347{
2348 struct bio *bio, *bio_src;
2349
2350 if (!bs)
2351 bs = fs_bio_set;
2352
2353 blk_rq_init(NULL, rq);
2354
2355 __rq_for_each_bio(bio_src, rq_src) {
2356 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2357 if (!bio)
2358 goto free_and_out;
2359
2360 __bio_clone(bio, bio_src);
2361
2362 if (bio_integrity(bio_src) &&
2363 bio_integrity_clone(bio, bio_src, gfp_mask))
2364 goto free_and_out;
2365
2366 if (bio_ctr && bio_ctr(bio, bio_src, data))
2367 goto free_and_out;
2368
2369 if (rq->bio) {
2370 rq->biotail->bi_next = bio;
2371 rq->biotail = bio;
2372 } else
2373 rq->bio = rq->biotail = bio;
2374 }
2375
2376 __blk_rq_prep_clone(rq, rq_src);
2377
2378 return 0;
2379
2380free_and_out:
2381 if (bio)
2382 bio_free(bio, bs);
2383 blk_rq_unprep_clone(rq);
2384
2385 return -ENOMEM;
2386}
2387EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2388
2148int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2389int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2149{ 2390{
2150 return queue_work(kblockd_workqueue, work); 2391 return queue_work(kblockd_workqueue, work);
@@ -2153,6 +2394,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
2153 2394
2154int __init blk_dev_init(void) 2395int __init blk_dev_init(void)
2155{ 2396{
2397 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2398 sizeof(((struct request *)0)->cmd_flags));
2399
2156 kblockd_workqueue = create_workqueue("kblockd"); 2400 kblockd_workqueue = create_workqueue("kblockd");
2157 if (!kblockd_workqueue) 2401 if (!kblockd_workqueue)
2158 panic("Failed to create kblockd\n"); 2402 panic("Failed to create kblockd\n");
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 6af716d1e54e..49557e91f0da 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 rq->rq_disk = bd_disk; 53 rq->rq_disk = bd_disk;
54 rq->cmd_flags |= REQ_NOMERGE;
55 rq->end_io = done; 54 rq->end_io = done;
56 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
57 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 91fa8e06b6a5..73e28d355688 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
340 kobject_uevent(&bi->kobj, KOBJ_ADD); 340 kobject_uevent(&bi->kobj, KOBJ_ADD);
341 341
342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; 342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
343 bi->sector_size = disk->queue->hardsect_size; 343 bi->sector_size = queue_logical_block_size(disk->queue);
344 disk->integrity = bi; 344 disk->integrity = bi;
345 } else 345 } else
346 bi = disk->integrity; 346 bi = disk->integrity;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 012f065ac8e2..d4ed6000147d 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
35 if (ioc == NULL) 35 if (ioc == NULL)
36 return 1; 36 return 1;
37 37
38 BUG_ON(atomic_read(&ioc->refcount) == 0); 38 BUG_ON(atomic_long_read(&ioc->refcount) == 0);
39 39
40 if (atomic_dec_and_test(&ioc->refcount)) { 40 if (atomic_long_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor) 42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic); 43 ioc->aic->dtor(ioc->aic);
@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
90 90
91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
92 if (ret) { 92 if (ret) {
93 atomic_set(&ret->refcount, 1); 93 atomic_long_set(&ret->refcount, 1);
94 atomic_set(&ret->nr_tasks, 1); 94 atomic_set(&ret->nr_tasks, 1);
95 spin_lock_init(&ret->lock); 95 spin_lock_init(&ret->lock);
96 ret->ioprio_changed = 0; 96 ret->ioprio_changed = 0;
@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
151 ret = current_io_context(gfp_flags, node); 151 ret = current_io_context(gfp_flags, node);
152 if (unlikely(!ret)) 152 if (unlikely(!ret))
153 break; 153 break;
154 } while (!atomic_inc_not_zero(&ret->refcount)); 154 } while (!atomic_long_inc_not_zero(&ret->refcount));
155 155
156 return ret; 156 return ret;
157} 157}
@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
163 struct io_context *dst = *pdst; 163 struct io_context *dst = *pdst;
164 164
165 if (src) { 165 if (src) {
166 BUG_ON(atomic_read(&src->refcount) == 0); 166 BUG_ON(atomic_long_read(&src->refcount) == 0);
167 atomic_inc(&src->refcount); 167 atomic_long_inc(&src->refcount);
168 put_io_context(dst); 168 put_io_context(dst);
169 *pdst = src; 169 *pdst = src;
170 } 170 }
diff --git a/block/blk-map.c b/block/blk-map.c
index f103729b462f..9083cf0180cc 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->data_len += bio->bi_size; 23 rq->__data_len += bio->bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28 27
29static int __blk_rq_unmap_user(struct bio *bio) 28static int __blk_rq_unmap_user(struct bio *bio)
30{ 29{
@@ -116,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
116 struct bio *bio = NULL; 115 struct bio *bio = NULL;
117 int ret; 116 int ret;
118 117
119 if (len > (q->max_hw_sectors << 9)) 118 if (len > (queue_max_hw_sectors(q) << 9))
120 return -EINVAL; 119 return -EINVAL;
121 if (!len) 120 if (!len)
122 return -EINVAL; 121 return -EINVAL;
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 if (!bio_flagged(bio, BIO_USER_MAPPED)) 155 if (!bio_flagged(bio, BIO_USER_MAPPED))
157 rq->cmd_flags |= REQ_COPY_USER; 156 rq->cmd_flags |= REQ_COPY_USER;
158 157
159 rq->buffer = rq->data = NULL; 158 rq->buffer = NULL;
160 return 0; 159 return 0;
161unmap_rq: 160unmap_rq:
162 blk_rq_unmap_user(bio); 161 blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
235 blk_queue_bounce(q, &bio); 234 blk_queue_bounce(q, &bio);
236 bio_get(bio); 235 bio_get(bio);
237 blk_rq_bio_prep(q, rq, bio); 236 blk_rq_bio_prep(q, rq, bio);
238 rq->buffer = rq->data = NULL; 237 rq->buffer = NULL;
239 return 0; 238 return 0;
240} 239}
241EXPORT_SYMBOL(blk_rq_map_user_iov); 240EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
282 * 281 *
283 * Description: 282 * Description:
284 * Data will be mapped directly if possible. Otherwise a bounce 283 * Data will be mapped directly if possible. Otherwise a bounce
285 * buffer is used. 284 * buffer is used. Can be called multple times to append multple
285 * buffers.
286 */ 286 */
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask) 288 unsigned int len, gfp_t gfp_mask)
@@ -290,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 int reading = rq_data_dir(rq) == READ; 290 int reading = rq_data_dir(rq) == READ;
291 int do_copy = 0; 291 int do_copy = 0;
292 struct bio *bio; 292 struct bio *bio;
293 int ret;
293 294
294 if (len > (q->max_hw_sectors << 9)) 295 if (len > (queue_max_hw_sectors(q) << 9))
295 return -EINVAL; 296 return -EINVAL;
296 if (!len || !kbuf) 297 if (!len || !kbuf)
297 return -EINVAL; 298 return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
311 if (do_copy) 312 if (do_copy)
312 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
313 314
314 blk_rq_bio_prep(q, rq, bio); 315 ret = blk_rq_append_bio(q, rq, bio);
316 if (unlikely(ret)) {
317 /* request is too big */
318 bio_put(bio);
319 return ret;
320 }
321
315 blk_queue_bounce(q, &rq->bio); 322 blk_queue_bounce(q, &rq->bio);
316 rq->buffer = rq->data = NULL; 323 rq->buffer = NULL;
317 return 0; 324 return 0;
318} 325}
319EXPORT_SYMBOL(blk_rq_map_kern); 326EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..39ce64432ba6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{
14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect;
17
18 /*
19 * Move the I/O submission pointers ahead if required.
20 */
21 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
22 (rq->sector <= rq->hard_sector)) {
23 rq->sector = rq->hard_sector;
24 rq->nr_sectors = rq->hard_nr_sectors;
25 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
26 rq->current_nr_sectors = rq->hard_cur_sectors;
27 rq->buffer = bio_data(rq->bio);
28 }
29
30 /*
31 * if total number of sectors is less than the first segment
32 * size, something has gone terribly wrong
33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors;
37 }
38 }
39}
40
41static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio) 13 struct bio *bio)
43{ 14{
@@ -61,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
61 * never considered part of another segment, since that 32 * never considered part of another segment, since that
62 * might change with the bounce page. 33 * might change with the bounce page.
63 */ 34 */
64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 35 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
65 if (high || highprv) 36 if (high || highprv)
66 goto new_segment; 37 goto new_segment;
67 if (cluster) { 38 if (cluster) {
68 if (seg_size + bv->bv_len > q->max_segment_size) 39 if (seg_size + bv->bv_len
40 > queue_max_segment_size(q))
69 goto new_segment; 41 goto new_segment;
70 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 42 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
71 goto new_segment; 43 goto new_segment;
@@ -120,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
120 return 0; 92 return 0;
121 93
122 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 94 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
123 q->max_segment_size) 95 queue_max_segment_size(q))
124 return 0; 96 return 0;
125 97
126 if (!bio_has_data(bio)) 98 if (!bio_has_data(bio))
@@ -163,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
163 int nbytes = bvec->bv_len; 135 int nbytes = bvec->bv_len;
164 136
165 if (bvprv && cluster) { 137 if (bvprv && cluster) {
166 if (sg->length + nbytes > q->max_segment_size) 138 if (sg->length + nbytes > queue_max_segment_size(q))
167 goto new_segment; 139 goto new_segment;
168 140
169 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 141 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -199,8 +171,9 @@ new_segment:
199 171
200 172
201 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 173 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
202 (rq->data_len & q->dma_pad_mask)) { 174 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
203 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; 175 unsigned int pad_len =
176 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
204 177
205 sg->length += pad_len; 178 sg->length += pad_len;
206 rq->extra_len += pad_len; 179 rq->extra_len += pad_len;
@@ -233,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
233{ 206{
234 int nr_phys_segs = bio_phys_segments(q, bio); 207 int nr_phys_segs = bio_phys_segments(q, bio);
235 208
236 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments 209 if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
237 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 210 req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
238 req->cmd_flags |= REQ_NOMERGE; 211 req->cmd_flags |= REQ_NOMERGE;
239 if (req == q->last_merge) 212 if (req == q->last_merge)
240 q->last_merge = NULL; 213 q->last_merge = NULL;
@@ -255,11 +228,11 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
255 unsigned short max_sectors; 228 unsigned short max_sectors;
256 229
257 if (unlikely(blk_pc_request(req))) 230 if (unlikely(blk_pc_request(req)))
258 max_sectors = q->max_hw_sectors; 231 max_sectors = queue_max_hw_sectors(q);
259 else 232 else
260 max_sectors = q->max_sectors; 233 max_sectors = queue_max_sectors(q);
261 234
262 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 235 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
263 req->cmd_flags |= REQ_NOMERGE; 236 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 237 if (req == q->last_merge)
265 q->last_merge = NULL; 238 q->last_merge = NULL;
@@ -279,12 +252,12 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
279 unsigned short max_sectors; 252 unsigned short max_sectors;
280 253
281 if (unlikely(blk_pc_request(req))) 254 if (unlikely(blk_pc_request(req)))
282 max_sectors = q->max_hw_sectors; 255 max_sectors = queue_max_hw_sectors(q);
283 else 256 else
284 max_sectors = q->max_sectors; 257 max_sectors = queue_max_sectors(q);
285 258
286 259
287 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 260 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
288 req->cmd_flags |= REQ_NOMERGE; 261 req->cmd_flags |= REQ_NOMERGE;
289 if (req == q->last_merge) 262 if (req == q->last_merge)
290 q->last_merge = NULL; 263 q->last_merge = NULL;
@@ -315,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
315 /* 288 /*
316 * Will it become too large? 289 * Will it become too large?
317 */ 290 */
318 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 291 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
319 return 0; 292 return 0;
320 293
321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 294 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -327,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
327 total_phys_segments--; 300 total_phys_segments--;
328 } 301 }
329 302
330 if (total_phys_segments > q->max_phys_segments) 303 if (total_phys_segments > queue_max_phys_segments(q))
331 return 0; 304 return 0;
332 305
333 if (total_phys_segments > q->max_hw_segments) 306 if (total_phys_segments > queue_max_hw_segments(q))
334 return 0; 307 return 0;
335 308
336 /* Merge is OK... */ 309 /* Merge is OK... */
@@ -345,7 +318,7 @@ static void blk_account_io_merge(struct request *req)
345 int cpu; 318 int cpu;
346 319
347 cpu = part_stat_lock(); 320 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector); 321 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
349 322
350 part_round_stats(cpu, part); 323 part_round_stats(cpu, part);
351 part_dec_in_flight(part); 324 part_dec_in_flight(part);
@@ -366,7 +339,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
366 /* 339 /*
367 * not contiguous 340 * not contiguous
368 */ 341 */
369 if (req->sector + req->nr_sectors != next->sector) 342 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
370 return 0; 343 return 0;
371 344
372 if (rq_data_dir(req) != rq_data_dir(next) 345 if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +371,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
398 req->biotail->bi_next = next->bio; 371 req->biotail->bi_next = next->bio;
399 req->biotail = next->biotail; 372 req->biotail = next->biotail;
400 373
401 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; 374 req->__data_len += blk_rq_bytes(next);
402 375
403 elv_merge_requests(q, req, next); 376 elv_merge_requests(q, req, next);
404 377
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 57af728d94bb..1c4df9bf6813 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
134 q->backing_dev_info.state = 0; 134 q->backing_dev_info.state = 0;
135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
137 blk_queue_hardsect_size(q, 512); 137 blk_queue_logical_block_size(q, 512);
138 blk_queue_dma_alignment(q, 511); 138 blk_queue_dma_alignment(q, 511);
139 blk_queue_congestion_threshold(q); 139 blk_queue_congestion_threshold(q);
140 q->nr_batching = BLK_BATCH_REQ; 140 q->nr_batching = BLK_BATCH_REQ;
@@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
179 */ 179 */
180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
181 dma = 1; 181 dma = 1;
182 q->bounce_pfn = max_low_pfn; 182 q->limits.bounce_pfn = max_low_pfn;
183#else 183#else
184 if (b_pfn < blk_max_low_pfn) 184 if (b_pfn < blk_max_low_pfn)
185 dma = 1; 185 dma = 1;
186 q->bounce_pfn = b_pfn; 186 q->limits.bounce_pfn = b_pfn;
187#endif 187#endif
188 if (dma) { 188 if (dma) {
189 init_emergency_isa_pool(); 189 init_emergency_isa_pool();
190 q->bounce_gfp = GFP_NOIO | GFP_DMA; 190 q->bounce_gfp = GFP_NOIO | GFP_DMA;
191 q->bounce_pfn = b_pfn; 191 q->limits.bounce_pfn = b_pfn;
192 } 192 }
193} 193}
194EXPORT_SYMBOL(blk_queue_bounce_limit); 194EXPORT_SYMBOL(blk_queue_bounce_limit);
@@ -211,14 +211,23 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
211 } 211 }
212 212
213 if (BLK_DEF_MAX_SECTORS > max_sectors) 213 if (BLK_DEF_MAX_SECTORS > max_sectors)
214 q->max_hw_sectors = q->max_sectors = max_sectors; 214 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
215 else { 215 else {
216 q->max_sectors = BLK_DEF_MAX_SECTORS; 216 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
217 q->max_hw_sectors = max_sectors; 217 q->limits.max_hw_sectors = max_sectors;
218 } 218 }
219} 219}
220EXPORT_SYMBOL(blk_queue_max_sectors); 220EXPORT_SYMBOL(blk_queue_max_sectors);
221 221
222void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
223{
224 if (BLK_DEF_MAX_SECTORS > max_sectors)
225 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
226 else
227 q->limits.max_hw_sectors = max_sectors;
228}
229EXPORT_SYMBOL(blk_queue_max_hw_sectors);
230
222/** 231/**
223 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 232 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
224 * @q: the request queue for the device 233 * @q: the request queue for the device
@@ -238,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
238 __func__, max_segments); 247 __func__, max_segments);
239 } 248 }
240 249
241 q->max_phys_segments = max_segments; 250 q->limits.max_phys_segments = max_segments;
242} 251}
243EXPORT_SYMBOL(blk_queue_max_phys_segments); 252EXPORT_SYMBOL(blk_queue_max_phys_segments);
244 253
@@ -262,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
262 __func__, max_segments); 271 __func__, max_segments);
263 } 272 }
264 273
265 q->max_hw_segments = max_segments; 274 q->limits.max_hw_segments = max_segments;
266} 275}
267EXPORT_SYMBOL(blk_queue_max_hw_segments); 276EXPORT_SYMBOL(blk_queue_max_hw_segments);
268 277
@@ -283,26 +292,110 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
283 __func__, max_size); 292 __func__, max_size);
284 } 293 }
285 294
286 q->max_segment_size = max_size; 295 q->limits.max_segment_size = max_size;
287} 296}
288EXPORT_SYMBOL(blk_queue_max_segment_size); 297EXPORT_SYMBOL(blk_queue_max_segment_size);
289 298
290/** 299/**
291 * blk_queue_hardsect_size - set hardware sector size for the queue 300 * blk_queue_logical_block_size - set logical block size for the queue
292 * @q: the request queue for the device 301 * @q: the request queue for the device
293 * @size: the hardware sector size, in bytes 302 * @size: the logical block size, in bytes
294 * 303 *
295 * Description: 304 * Description:
296 * This should typically be set to the lowest possible sector size 305 * This should be set to the lowest possible block size that the
297 * that the hardware can operate on (possible without reverting to 306 * storage device can address. The default of 512 covers most
298 * even internal read-modify-write operations). Usually the default 307 * hardware.
299 * of 512 covers most hardware.
300 **/ 308 **/
301void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 309void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
310{
311 q->limits.logical_block_size = size;
312
313 if (q->limits.physical_block_size < size)
314 q->limits.physical_block_size = size;
315
316 if (q->limits.io_min < q->limits.physical_block_size)
317 q->limits.io_min = q->limits.physical_block_size;
318}
319EXPORT_SYMBOL(blk_queue_logical_block_size);
320
321/**
322 * blk_queue_physical_block_size - set physical block size for the queue
323 * @q: the request queue for the device
324 * @size: the physical block size, in bytes
325 *
326 * Description:
327 * This should be set to the lowest possible sector size that the
328 * hardware can operate on without reverting to read-modify-write
329 * operations.
330 */
331void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
332{
333 q->limits.physical_block_size = size;
334
335 if (q->limits.physical_block_size < q->limits.logical_block_size)
336 q->limits.physical_block_size = q->limits.logical_block_size;
337
338 if (q->limits.io_min < q->limits.physical_block_size)
339 q->limits.io_min = q->limits.physical_block_size;
340}
341EXPORT_SYMBOL(blk_queue_physical_block_size);
342
343/**
344 * blk_queue_alignment_offset - set physical block alignment offset
345 * @q: the request queue for the device
346 * @alignment: alignment offset in bytes
347 *
348 * Description:
349 * Some devices are naturally misaligned to compensate for things like
350 * the legacy DOS partition table 63-sector offset. Low-level drivers
351 * should call this function for devices whose first sector is not
352 * naturally aligned.
353 */
354void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
302{ 355{
303 q->hardsect_size = size; 356 q->limits.alignment_offset =
357 offset & (q->limits.physical_block_size - 1);
358 q->limits.misaligned = 0;
304} 359}
305EXPORT_SYMBOL(blk_queue_hardsect_size); 360EXPORT_SYMBOL(blk_queue_alignment_offset);
361
362/**
363 * blk_queue_io_min - set minimum request size for the queue
364 * @q: the request queue for the device
365 * @io_min: smallest I/O size in bytes
366 *
367 * Description:
368 * Some devices have an internal block size bigger than the reported
369 * hardware sector size. This function can be used to signal the
370 * smallest I/O the device can perform without incurring a performance
371 * penalty.
372 */
373void blk_queue_io_min(struct request_queue *q, unsigned int min)
374{
375 q->limits.io_min = min;
376
377 if (q->limits.io_min < q->limits.logical_block_size)
378 q->limits.io_min = q->limits.logical_block_size;
379
380 if (q->limits.io_min < q->limits.physical_block_size)
381 q->limits.io_min = q->limits.physical_block_size;
382}
383EXPORT_SYMBOL(blk_queue_io_min);
384
385/**
386 * blk_queue_io_opt - set optimal request size for the queue
387 * @q: the request queue for the device
388 * @io_opt: optimal request size in bytes
389 *
390 * Description:
391 * Drivers can call this function to set the preferred I/O request
392 * size for devices that report such a value.
393 */
394void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
395{
396 q->limits.io_opt = opt;
397}
398EXPORT_SYMBOL(blk_queue_io_opt);
306 399
307/* 400/*
308 * Returns the minimum that is _not_ zero, unless both are zero. 401 * Returns the minimum that is _not_ zero, unless both are zero.
@@ -317,14 +410,27 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
317void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 410void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
318{ 411{
319 /* zero is "infinity" */ 412 /* zero is "infinity" */
320 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 413 t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
321 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 414 queue_max_sectors(b));
322 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); 415
416 t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
417 queue_max_hw_sectors(b));
418
419 t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
420 queue_segment_boundary(b));
421
422 t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
423 queue_max_phys_segments(b));
424
425 t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
426 queue_max_hw_segments(b));
427
428 t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
429 queue_max_segment_size(b));
430
431 t->limits.logical_block_size = max(queue_logical_block_size(t),
432 queue_logical_block_size(b));
323 433
324 t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
325 t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
326 t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
327 t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
328 if (!t->queue_lock) 434 if (!t->queue_lock)
329 WARN_ON_ONCE(1); 435 WARN_ON_ONCE(1);
330 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 436 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -337,6 +443,109 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
337EXPORT_SYMBOL(blk_queue_stack_limits); 443EXPORT_SYMBOL(blk_queue_stack_limits);
338 444
339/** 445/**
446 * blk_stack_limits - adjust queue_limits for stacked devices
447 * @t: the stacking driver limits (top)
448 * @b: the underlying queue limits (bottom)
449 * @offset: offset to beginning of data within component device
450 *
451 * Description:
452 * Merges two queue_limit structs. Returns 0 if alignment didn't
453 * change. Returns -1 if adding the bottom device caused
454 * misalignment.
455 */
456int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
457 sector_t offset)
458{
459 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
460 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
461 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
462
463 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
464 b->seg_boundary_mask);
465
466 t->max_phys_segments = min_not_zero(t->max_phys_segments,
467 b->max_phys_segments);
468
469 t->max_hw_segments = min_not_zero(t->max_hw_segments,
470 b->max_hw_segments);
471
472 t->max_segment_size = min_not_zero(t->max_segment_size,
473 b->max_segment_size);
474
475 t->logical_block_size = max(t->logical_block_size,
476 b->logical_block_size);
477
478 t->physical_block_size = max(t->physical_block_size,
479 b->physical_block_size);
480
481 t->io_min = max(t->io_min, b->io_min);
482 t->no_cluster |= b->no_cluster;
483
484 /* Bottom device offset aligned? */
485 if (offset &&
486 (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
487 t->misaligned = 1;
488 return -1;
489 }
490
491 /* If top has no alignment offset, inherit from bottom */
492 if (!t->alignment_offset)
493 t->alignment_offset =
494 b->alignment_offset & (b->physical_block_size - 1);
495
496 /* Top device aligned on logical block boundary? */
497 if (t->alignment_offset & (t->logical_block_size - 1)) {
498 t->misaligned = 1;
499 return -1;
500 }
501
502 return 0;
503}
504EXPORT_SYMBOL(blk_stack_limits);
505
506/**
507 * disk_stack_limits - adjust queue limits for stacked drivers
508 * @disk: MD/DM gendisk (top)
509 * @bdev: the underlying block device (bottom)
510 * @offset: offset to beginning of data within component device
511 *
512 * Description:
513 * Merges the limits for two queues. Returns 0 if alignment
514 * didn't change. Returns -1 if adding the bottom device caused
515 * misalignment.
516 */
517void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
518 sector_t offset)
519{
520 struct request_queue *t = disk->queue;
521 struct request_queue *b = bdev_get_queue(bdev);
522
523 offset += get_start_sect(bdev) << 9;
524
525 if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
526 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
527
528 disk_name(disk, 0, top);
529 bdevname(bdev, bottom);
530
531 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
532 top, bottom);
533 }
534
535 if (!t->queue_lock)
536 WARN_ON_ONCE(1);
537 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
538 unsigned long flags;
539
540 spin_lock_irqsave(t->queue_lock, flags);
541 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
542 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
543 spin_unlock_irqrestore(t->queue_lock, flags);
544 }
545}
546EXPORT_SYMBOL(disk_stack_limits);
547
548/**
340 * blk_queue_dma_pad - set pad mask 549 * blk_queue_dma_pad - set pad mask
341 * @q: the request queue for the device 550 * @q: the request queue for the device
342 * @mask: pad mask 551 * @mask: pad mask
@@ -396,11 +605,11 @@ int blk_queue_dma_drain(struct request_queue *q,
396 dma_drain_needed_fn *dma_drain_needed, 605 dma_drain_needed_fn *dma_drain_needed,
397 void *buf, unsigned int size) 606 void *buf, unsigned int size)
398{ 607{
399 if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 608 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
400 return -EINVAL; 609 return -EINVAL;
401 /* make room for appending the drain */ 610 /* make room for appending the drain */
402 --q->max_hw_segments; 611 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
403 --q->max_phys_segments; 612 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
404 q->dma_drain_needed = dma_drain_needed; 613 q->dma_drain_needed = dma_drain_needed;
405 q->dma_drain_buffer = buf; 614 q->dma_drain_buffer = buf;
406 q->dma_drain_size = size; 615 q->dma_drain_size = size;
@@ -422,7 +631,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
422 __func__, mask); 631 __func__, mask);
423 } 632 }
424 633
425 q->seg_boundary_mask = mask; 634 q->limits.seg_boundary_mask = mask;
426} 635}
427EXPORT_SYMBOL(blk_queue_segment_boundary); 636EXPORT_SYMBOL(blk_queue_segment_boundary);
428 637
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 26f9ec28f56c..b1cd04087d6a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -95,21 +95,36 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
95 95
96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 96static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
97{ 97{
98 int max_sectors_kb = q->max_sectors >> 1; 98 int max_sectors_kb = queue_max_sectors(q) >> 1;
99 99
100 return queue_var_show(max_sectors_kb, (page)); 100 return queue_var_show(max_sectors_kb, (page));
101} 101}
102 102
103static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) 103static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
104{ 104{
105 return queue_var_show(q->hardsect_size, page); 105 return queue_var_show(queue_logical_block_size(q), page);
106}
107
108static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
109{
110 return queue_var_show(queue_physical_block_size(q), page);
111}
112
113static ssize_t queue_io_min_show(struct request_queue *q, char *page)
114{
115 return queue_var_show(queue_io_min(q), page);
116}
117
118static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
119{
120 return queue_var_show(queue_io_opt(q), page);
106} 121}
107 122
108static ssize_t 123static ssize_t
109queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 124queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
110{ 125{
111 unsigned long max_sectors_kb, 126 unsigned long max_sectors_kb,
112 max_hw_sectors_kb = q->max_hw_sectors >> 1, 127 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
113 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 128 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
114 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 129 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
115 130
@@ -117,7 +132,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
117 return -EINVAL; 132 return -EINVAL;
118 133
119 spin_lock_irq(q->queue_lock); 134 spin_lock_irq(q->queue_lock);
120 q->max_sectors = max_sectors_kb << 1; 135 blk_queue_max_sectors(q, max_sectors_kb << 1);
121 spin_unlock_irq(q->queue_lock); 136 spin_unlock_irq(q->queue_lock);
122 137
123 return ret; 138 return ret;
@@ -125,7 +140,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
125 140
126static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 141static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
127{ 142{
128 int max_hw_sectors_kb = q->max_hw_sectors >> 1; 143 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
129 144
130 return queue_var_show(max_hw_sectors_kb, (page)); 145 return queue_var_show(max_hw_sectors_kb, (page));
131} 146}
@@ -249,7 +264,27 @@ static struct queue_sysfs_entry queue_iosched_entry = {
249 264
250static struct queue_sysfs_entry queue_hw_sector_size_entry = { 265static struct queue_sysfs_entry queue_hw_sector_size_entry = {
251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 266 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
252 .show = queue_hw_sector_size_show, 267 .show = queue_logical_block_size_show,
268};
269
270static struct queue_sysfs_entry queue_logical_block_size_entry = {
271 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
272 .show = queue_logical_block_size_show,
273};
274
275static struct queue_sysfs_entry queue_physical_block_size_entry = {
276 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
277 .show = queue_physical_block_size_show,
278};
279
280static struct queue_sysfs_entry queue_io_min_entry = {
281 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
282 .show = queue_io_min_show,
283};
284
285static struct queue_sysfs_entry queue_io_opt_entry = {
286 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
287 .show = queue_io_opt_show,
253}; 288};
254 289
255static struct queue_sysfs_entry queue_nonrot_entry = { 290static struct queue_sysfs_entry queue_nonrot_entry = {
@@ -283,6 +318,10 @@ static struct attribute *default_attrs[] = {
283 &queue_max_sectors_entry.attr, 318 &queue_max_sectors_entry.attr,
284 &queue_iosched_entry.attr, 319 &queue_iosched_entry.attr,
285 &queue_hw_sector_size_entry.attr, 320 &queue_hw_sector_size_entry.attr,
321 &queue_logical_block_size_entry.attr,
322 &queue_physical_block_size_entry.attr,
323 &queue_io_min_entry.attr,
324 &queue_io_opt_entry.attr,
286 &queue_nonrot_entry.attr, 325 &queue_nonrot_entry.attr,
287 &queue_nomerges_entry.attr, 326 &queue_nomerges_entry.attr,
288 &queue_rq_affinity_entry.attr, 327 &queue_rq_affinity_entry.attr,
@@ -394,16 +433,15 @@ int blk_register_queue(struct gendisk *disk)
394 if (ret) 433 if (ret)
395 return ret; 434 return ret;
396 435
397 if (!q->request_fn) 436 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
398 return 0;
399
400 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj),
401 "%s", "queue");
402 if (ret < 0) 437 if (ret < 0)
403 return ret; 438 return ret;
404 439
405 kobject_uevent(&q->kobj, KOBJ_ADD); 440 kobject_uevent(&q->kobj, KOBJ_ADD);
406 441
442 if (!q->request_fn)
443 return 0;
444
407 ret = elv_register_queue(q); 445 ret = elv_register_queue(q);
408 if (ret) { 446 if (ret) {
409 kobject_uevent(&q->kobj, KOBJ_REMOVE); 447 kobject_uevent(&q->kobj, KOBJ_REMOVE);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3c518e3303ae..2e5cfeb59333 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
336int blk_queue_start_tag(struct request_queue *q, struct request *rq) 336int blk_queue_start_tag(struct request_queue *q, struct request *rq)
337{ 337{
338 struct blk_queue_tag *bqt = q->queue_tags; 338 struct blk_queue_tag *bqt = q->queue_tags;
339 unsigned max_depth, offset; 339 unsigned max_depth;
340 int tag; 340 int tag;
341 341
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
355 * to starve sync IO on behalf of flooding async IO. 355 * to starve sync IO on behalf of flooding async IO.
356 */ 356 */
357 max_depth = bqt->max_depth; 357 max_depth = bqt->max_depth;
358 if (rq_is_sync(rq)) 358 if (!rq_is_sync(rq) && max_depth > 1) {
359 offset = 0; 359 max_depth -= 2;
360 else 360 if (!max_depth)
361 offset = max_depth >> 2; 361 max_depth = 1;
362 if (q->in_flight[0] > max_depth)
363 return 1;
364 }
362 365
363 do { 366 do {
364 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); 367 tag = find_first_zero_bit(bqt->tag_map, max_depth);
365 if (tag >= max_depth) 368 if (tag >= max_depth)
366 return 1; 369 return 1;
367 370
@@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
374 rq->cmd_flags |= REQ_QUEUED; 377 rq->cmd_flags |= REQ_QUEUED;
375 rq->tag = tag; 378 rq->tag = tag;
376 bqt->tag_index[tag] = rq; 379 bqt->tag_index[tag] = rq;
377 blkdev_dequeue_request(rq); 380 blk_start_request(rq);
378 list_add(&rq->queuelist, &q->tag_busy_list); 381 list_add(&rq->queuelist, &q->tag_busy_list);
379 return 0; 382 return 0;
380} 383}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ec0d503cacd..1ba7e0aca878 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
122 if (blk_mark_rq_complete(rq)) 122 if (blk_mark_rq_complete(rq))
123 continue; 123 continue;
124 blk_rq_timed_out(rq); 124 blk_rq_timed_out(rq);
125 } else { 125 } else if (!next || time_after(next, rq->deadline))
126 if (!next || time_after(next, rq->deadline)) 126 next = rq->deadline;
127 next = rq->deadline;
128 }
129 } 127 }
130 128
131 /* 129 /*
@@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
176 BUG_ON(!list_empty(&req->timeout_list)); 174 BUG_ON(!list_empty(&req->timeout_list));
177 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 175 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
178 176
179 if (req->timeout) 177 /*
180 req->deadline = jiffies + req->timeout; 178 * Some LLDs, like scsi, peek at the timeout to prevent a
181 else { 179 * command from being retried forever.
182 req->deadline = jiffies + q->rq_timeout; 180 */
183 /* 181 if (!req->timeout)
184 * Some LLDs, like scsi, peek at the timeout to prevent
185 * a command from being retried forever.
186 */
187 req->timeout = q->rq_timeout; 182 req->timeout = q->rq_timeout;
188 } 183
184 req->deadline = jiffies + req->timeout;
189 list_add_tail(&req->timeout_list, &q->timeout_list); 185 list_add_tail(&req->timeout_list, &q->timeout_list);
190 186
191 /* 187 /*
diff --git a/block/blk.h b/block/blk.h
index 79c85f7c9ff5..3fae6add5430 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
13void init_request_from_bio(struct request *req, struct bio *bio); 13void init_request_from_bio(struct request *req, struct bio *bio);
14void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 14void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
15 struct bio *bio); 15 struct bio *bio);
16int blk_rq_append_bio(struct request_queue *q, struct request *rq,
17 struct bio *bio);
18void blk_dequeue_request(struct request *rq);
16void __blk_queue_free_tags(struct request_queue *q); 19void __blk_queue_free_tags(struct request_queue *q);
17 20
18void blk_unplug_work(struct work_struct *work); 21void blk_unplug_work(struct work_struct *work);
@@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
43 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 46 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
44} 47}
45 48
49/*
50 * Internal elevator interface
51 */
52#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
53
54static inline struct request *__elv_next_request(struct request_queue *q)
55{
56 struct request *rq;
57
58 while (1) {
59 while (!list_empty(&q->queue_head)) {
60 rq = list_entry_rq(q->queue_head.next);
61 if (blk_do_ordered(q, &rq))
62 return rq;
63 }
64
65 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
66 return NULL;
67 }
68}
69
70static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
71{
72 struct elevator_queue *e = q->elevator;
73
74 if (e->ops->elevator_activate_req_fn)
75 e->ops->elevator_activate_req_fn(q, rq);
76}
77
78static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
79{
80 struct elevator_queue *e = q->elevator;
81
82 if (e->ops->elevator_deactivate_req_fn)
83 e->ops->elevator_deactivate_req_fn(q, rq);
84}
85
46#ifdef CONFIG_FAIL_IO_TIMEOUT 86#ifdef CONFIG_FAIL_IO_TIMEOUT
47int blk_should_fake_timeout(struct request_queue *); 87int blk_should_fake_timeout(struct request_queue *);
48ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 88ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
64int attempt_back_merge(struct request_queue *q, struct request *rq); 104int attempt_back_merge(struct request_queue *q, struct request *rq);
65int attempt_front_merge(struct request_queue *q, struct request *rq); 105int attempt_front_merge(struct request_queue *q, struct request *rq);
66void blk_recalc_rq_segments(struct request *rq); 106void blk_recalc_rq_segments(struct request *rq);
67void blk_recalc_rq_sectors(struct request *rq, int nsect);
68 107
69void blk_queue_congestion_threshold(struct request_queue *q); 108void blk_queue_congestion_threshold(struct request_queue *q);
70 109
@@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
112#endif 151#endif
113} 152}
114 153
154/*
155 * Contribute to IO statistics IFF:
156 *
157 * a) it's attached to a gendisk, and
158 * b) the queue had IO stats enabled when this request was started, and
159 * c) it's a file system request or a discard request
160 */
115static inline int blk_do_io_stat(struct request *rq) 161static inline int blk_do_io_stat(struct request *rq)
116{ 162{
117 return rq->rq_disk && blk_rq_io_stat(rq); 163 return rq->rq_disk && blk_rq_io_stat(rq) &&
164 (blk_fs_request(rq) || blk_discard_rq(rq));
118} 165}
119 166
120#endif 167#endif
diff --git a/block/bsg.c b/block/bsg.c
index dd81be455e00..5358f9ae13c1 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -446,15 +446,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
446 } 446 }
447 447
448 if (rq->next_rq) { 448 if (rq->next_rq) {
449 hdr->dout_resid = rq->data_len; 449 hdr->dout_resid = rq->resid_len;
450 hdr->din_resid = rq->next_rq->data_len; 450 hdr->din_resid = rq->next_rq->resid_len;
451 blk_rq_unmap_user(bidi_bio); 451 blk_rq_unmap_user(bidi_bio);
452 rq->next_rq->bio = NULL; 452 rq->next_rq->bio = NULL;
453 blk_put_request(rq->next_rq); 453 blk_put_request(rq->next_rq);
454 } else if (rq_data_dir(rq) == READ) 454 } else if (rq_data_dir(rq) == READ)
455 hdr->din_resid = rq->data_len; 455 hdr->din_resid = rq->resid_len;
456 else 456 else
457 hdr->dout_resid = rq->data_len; 457 hdr->dout_resid = rq->resid_len;
458 458
459 /* 459 /*
460 * If the request generated a negative error number, return it 460 * If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a55a9bd75bd1..ef2f72d42434 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
350 return rq2; 350 return rq2;
351 351
352 s1 = rq1->sector; 352 s1 = blk_rq_pos(rq1);
353 s2 = rq2->sector; 353 s2 = blk_rq_pos(rq2);
354 354
355 last = cfqd->last_position; 355 last = cfqd->last_position;
356 356
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
579 * Sort strictly based on sector. Smallest to the left, 579 * Sort strictly based on sector. Smallest to the left,
580 * largest to the right. 580 * largest to the right.
581 */ 581 */
582 if (sector > cfqq->next_rq->sector) 582 if (sector > blk_rq_pos(cfqq->next_rq))
583 n = &(*p)->rb_right; 583 n = &(*p)->rb_right;
584 else if (sector < cfqq->next_rq->sector) 584 else if (sector < blk_rq_pos(cfqq->next_rq))
585 n = &(*p)->rb_left; 585 n = &(*p)->rb_left;
586 else 586 else
587 break; 587 break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
611 return; 611 return;
612 612
613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, 614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
615 &parent, &p); 615 blk_rq_pos(cfqq->next_rq), &parent, &p);
616 if (!__cfqq) { 616 if (!__cfqq) {
617 rb_link_node(&cfqq->p_node, parent, p); 617 rb_link_node(&cfqq->p_node, parent, p);
618 rb_insert_color(&cfqq->p_node, cfqq->p_root); 618 rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
760 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 760 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
761 cfqd->rq_in_driver); 761 cfqd->rq_in_driver);
762 762
763 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 763 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
764} 764}
765 765
766static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 766static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
950 struct request *rq) 950 struct request *rq)
951{ 951{
952 if (rq->sector >= cfqd->last_position) 952 if (blk_rq_pos(rq) >= cfqd->last_position)
953 return rq->sector - cfqd->last_position; 953 return blk_rq_pos(rq) - cfqd->last_position;
954 else 954 else
955 return cfqd->last_position - rq->sector; 955 return cfqd->last_position - blk_rq_pos(rq);
956} 956}
957 957
958#define CIC_SEEK_THR 8 * 1024 958#define CIC_SEEK_THR 8 * 1024
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
996 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq; 997 return __cfqq;
998 998
999 if (__cfqq->next_rq->sector < sector) 999 if (blk_rq_pos(__cfqq->next_rq) < sector)
1000 node = rb_next(&__cfqq->p_node); 1000 node = rb_next(&__cfqq->p_node);
1001 else 1001 else
1002 node = rb_prev(&__cfqq->p_node); 1002 node = rb_prev(&__cfqq->p_node);
@@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1282 if (!cfqd->active_cic) { 1282 if (!cfqd->active_cic) {
1283 struct cfq_io_context *cic = RQ_CIC(rq); 1283 struct cfq_io_context *cic = RQ_CIC(rq);
1284 1284
1285 atomic_inc(&cic->ioc->refcount); 1285 atomic_long_inc(&cic->ioc->refcount);
1286 cfqd->active_cic = cic; 1286 cfqd->active_cic = cic;
1287 } 1287 }
1288} 1288}
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1918 1918
1919 if (!cic->last_request_pos) 1919 if (!cic->last_request_pos)
1920 sdist = 0; 1920 sdist = 0;
1921 else if (cic->last_request_pos < rq->sector) 1921 else if (cic->last_request_pos < blk_rq_pos(rq))
1922 sdist = rq->sector - cic->last_request_pos; 1922 sdist = blk_rq_pos(rq) - cic->last_request_pos;
1923 else 1923 else
1924 sdist = cic->last_request_pos - rq->sector; 1924 sdist = cic->last_request_pos - blk_rq_pos(rq);
1925 1925
1926 /* 1926 /*
1927 * Don't allow the seek distance to get too large from the 1927 * Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2071 cfq_update_io_seektime(cfqd, cic, rq); 2071 cfq_update_io_seektime(cfqd, cic, rq);
2072 cfq_update_idle_window(cfqd, cfqq, cic); 2072 cfq_update_idle_window(cfqd, cfqq, cic);
2073 2073
2074 cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2075 2075
2076 if (cfqq == cfqd->active_queue) { 2076 if (cfqq == cfqd->active_queue) {
2077 /* 2077 /*
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2088 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 2088 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2089 cfqd->busy_queues > 1) { 2089 cfqd->busy_queues > 1) {
2090 del_timer(&cfqd->idle_slice_timer); 2090 del_timer(&cfqd->idle_slice_timer);
2091 blk_start_queueing(cfqd->queue); 2091 __blk_run_queue(cfqd->queue);
2092 } 2092 }
2093 cfq_mark_cfqq_must_dispatch(cfqq); 2093 cfq_mark_cfqq_must_dispatch(cfqq);
2094 } 2094 }
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2100 * this new queue is RT and the current one is BE 2100 * this new queue is RT and the current one is BE
2101 */ 2101 */
2102 cfq_preempt_queue(cfqd, cfqq); 2102 cfq_preempt_queue(cfqd, cfqq);
2103 blk_start_queueing(cfqd->queue); 2103 __blk_run_queue(cfqd->queue);
2104 } 2104 }
2105} 2105}
2106 2106
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
2345 struct request_queue *q = cfqd->queue; 2345 struct request_queue *q = cfqd->queue;
2346 2346
2347 spin_lock_irq(q->queue_lock); 2347 spin_lock_irq(q->queue_lock);
2348 blk_start_queueing(q); 2348 __blk_run_queue(cfqd->queue);
2349 spin_unlock_irq(q->queue_lock); 2349 spin_unlock_irq(q->queue_lock);
2350} 2350}
2351 2351
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f8c218cd08e1..7865a34e0faa 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -763,10 +763,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
764 return compat_put_int(arg, block_size(bdev)); 764 return compat_put_int(arg, block_size(bdev));
765 case BLKSSZGET: /* get block device hardware sector size */ 765 case BLKSSZGET: /* get block device hardware sector size */
766 return compat_put_int(arg, bdev_hardsect_size(bdev)); 766 return compat_put_int(arg, bdev_logical_block_size(bdev));
767 case BLKSECTGET: 767 case BLKSECTGET:
768 return compat_put_ushort(arg, 768 return compat_put_ushort(arg,
769 bdev_get_queue(bdev)->max_sectors); 769 queue_max_sectors(bdev_get_queue(bdev)));
770 case BLKRASET: /* compatible, but no compat_ptr (!) */ 770 case BLKRASET: /* compatible, but no compat_ptr (!) */
771 case BLKFRASET: 771 case BLKFRASET:
772 if (!capable(CAP_SYS_ADMIN)) 772 if (!capable(CAP_SYS_ADMIN))
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4adef..b547cbca7b23 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
138 138
139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
140 if (__rq) { 140 if (__rq) {
141 BUG_ON(sector != __rq->sector); 141 BUG_ON(sector != blk_rq_pos(__rq));
142 142
143 if (elv_rq_merge_ok(__rq, bio)) { 143 if (elv_rq_merge_ok(__rq, bio)) {
144 ret = ELEVATOR_FRONT_MERGE; 144 ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index e220f0c543e3..ca861927ba41 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,8 +51,7 @@ static const int elv_hash_shift = 6;
51#define ELV_HASH_FN(sec) \ 51#define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 53#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
56 55
57/* 56/*
58 * Query io scheduler to see if the current process issuing bio may be 57 * Query io scheduler to see if the current process issuing bio may be
@@ -116,9 +115,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
116 * we can merge and sequence is ok, check if it's possible 115 * we can merge and sequence is ok, check if it's possible
117 */ 116 */
118 if (elv_rq_merge_ok(__rq, bio)) { 117 if (elv_rq_merge_ok(__rq, bio)) {
119 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 118 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
120 ret = ELEVATOR_BACK_MERGE; 119 ret = ELEVATOR_BACK_MERGE;
121 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 120 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
122 ret = ELEVATOR_FRONT_MERGE; 121 ret = ELEVATOR_FRONT_MERGE;
123 } 122 }
124 123
@@ -306,22 +305,6 @@ void elevator_exit(struct elevator_queue *e)
306} 305}
307EXPORT_SYMBOL(elevator_exit); 306EXPORT_SYMBOL(elevator_exit);
308 307
309static void elv_activate_rq(struct request_queue *q, struct request *rq)
310{
311 struct elevator_queue *e = q->elevator;
312
313 if (e->ops->elevator_activate_req_fn)
314 e->ops->elevator_activate_req_fn(q, rq);
315}
316
317static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
318{
319 struct elevator_queue *e = q->elevator;
320
321 if (e->ops->elevator_deactivate_req_fn)
322 e->ops->elevator_deactivate_req_fn(q, rq);
323}
324
325static inline void __elv_rqhash_del(struct request *rq) 308static inline void __elv_rqhash_del(struct request *rq)
326{ 309{
327 hlist_del_init(&rq->hash); 310 hlist_del_init(&rq->hash);
@@ -383,9 +366,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
383 parent = *p; 366 parent = *p;
384 __rq = rb_entry(parent, struct request, rb_node); 367 __rq = rb_entry(parent, struct request, rb_node);
385 368
386 if (rq->sector < __rq->sector) 369 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
387 p = &(*p)->rb_left; 370 p = &(*p)->rb_left;
388 else if (rq->sector > __rq->sector) 371 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
389 p = &(*p)->rb_right; 372 p = &(*p)->rb_right;
390 else 373 else
391 return __rq; 374 return __rq;
@@ -413,9 +396,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
413 while (n) { 396 while (n) {
414 rq = rb_entry(n, struct request, rb_node); 397 rq = rb_entry(n, struct request, rb_node);
415 398
416 if (sector < rq->sector) 399 if (sector < blk_rq_pos(rq))
417 n = n->rb_left; 400 n = n->rb_left;
418 else if (sector > rq->sector) 401 else if (sector > blk_rq_pos(rq))
419 n = n->rb_right; 402 n = n->rb_right;
420 else 403 else
421 return rq; 404 return rq;
@@ -454,14 +437,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
454 break; 437 break;
455 if (pos->cmd_flags & stop_flags) 438 if (pos->cmd_flags & stop_flags)
456 break; 439 break;
457 if (rq->sector >= boundary) { 440 if (blk_rq_pos(rq) >= boundary) {
458 if (pos->sector < boundary) 441 if (blk_rq_pos(pos) < boundary)
459 continue; 442 continue;
460 } else { 443 } else {
461 if (pos->sector >= boundary) 444 if (blk_rq_pos(pos) >= boundary)
462 break; 445 break;
463 } 446 }
464 if (rq->sector >= pos->sector) 447 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
465 break; 448 break;
466 } 449 }
467 450
@@ -559,7 +542,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
559 * in_flight count again 542 * in_flight count again
560 */ 543 */
561 if (blk_account_rq(rq)) { 544 if (blk_account_rq(rq)) {
562 q->in_flight--; 545 q->in_flight[rq_is_sync(rq)]--;
563 if (blk_sorted_rq(rq)) 546 if (blk_sorted_rq(rq))
564 elv_deactivate_rq(q, rq); 547 elv_deactivate_rq(q, rq);
565 } 548 }
@@ -588,6 +571,9 @@ void elv_drain_elevator(struct request_queue *q)
588 */ 571 */
589void elv_quiesce_start(struct request_queue *q) 572void elv_quiesce_start(struct request_queue *q)
590{ 573{
574 if (!q->elevator)
575 return;
576
591 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 577 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
592 578
593 /* 579 /*
@@ -595,7 +581,7 @@ void elv_quiesce_start(struct request_queue *q)
595 */ 581 */
596 elv_drain_elevator(q); 582 elv_drain_elevator(q);
597 while (q->rq.elvpriv) { 583 while (q->rq.elvpriv) {
598 blk_start_queueing(q); 584 __blk_run_queue(q);
599 spin_unlock_irq(q->queue_lock); 585 spin_unlock_irq(q->queue_lock);
600 msleep(10); 586 msleep(10);
601 spin_lock_irq(q->queue_lock); 587 spin_lock_irq(q->queue_lock);
@@ -639,8 +625,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
639 * with anything. There's no point in delaying queue 625 * with anything. There's no point in delaying queue
640 * processing. 626 * processing.
641 */ 627 */
642 blk_remove_plug(q); 628 __blk_run_queue(q);
643 blk_start_queueing(q);
644 break; 629 break;
645 630
646 case ELEVATOR_INSERT_SORT: 631 case ELEVATOR_INSERT_SORT:
@@ -699,7 +684,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
699 684
700 if (unplug_it && blk_queue_plugged(q)) { 685 if (unplug_it && blk_queue_plugged(q)) {
701 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 686 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
702 - q->in_flight; 687 - queue_in_flight(q);
703 688
704 if (nrq >= q->unplug_thresh) 689 if (nrq >= q->unplug_thresh)
705 __generic_unplug_device(q); 690 __generic_unplug_device(q);
@@ -755,117 +740,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
755} 740}
756EXPORT_SYMBOL(elv_add_request); 741EXPORT_SYMBOL(elv_add_request);
757 742
758static inline struct request *__elv_next_request(struct request_queue *q)
759{
760 struct request *rq;
761
762 while (1) {
763 while (!list_empty(&q->queue_head)) {
764 rq = list_entry_rq(q->queue_head.next);
765 if (blk_do_ordered(q, &rq))
766 return rq;
767 }
768
769 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
770 return NULL;
771 }
772}
773
774struct request *elv_next_request(struct request_queue *q)
775{
776 struct request *rq;
777 int ret;
778
779 while ((rq = __elv_next_request(q)) != NULL) {
780 if (!(rq->cmd_flags & REQ_STARTED)) {
781 /*
782 * This is the first time the device driver
783 * sees this request (possibly after
784 * requeueing). Notify IO scheduler.
785 */
786 if (blk_sorted_rq(rq))
787 elv_activate_rq(q, rq);
788
789 /*
790 * just mark as started even if we don't start
791 * it, a request that has been delayed should
792 * not be passed by new incoming requests
793 */
794 rq->cmd_flags |= REQ_STARTED;
795 trace_block_rq_issue(q, rq);
796 }
797
798 if (!q->boundary_rq || q->boundary_rq == rq) {
799 q->end_sector = rq_end_sector(rq);
800 q->boundary_rq = NULL;
801 }
802
803 if (rq->cmd_flags & REQ_DONTPREP)
804 break;
805
806 if (q->dma_drain_size && rq->data_len) {
807 /*
808 * make sure space for the drain appears we
809 * know we can do this because max_hw_segments
810 * has been adjusted to be one fewer than the
811 * device can handle
812 */
813 rq->nr_phys_segments++;
814 }
815
816 if (!q->prep_rq_fn)
817 break;
818
819 ret = q->prep_rq_fn(q, rq);
820 if (ret == BLKPREP_OK) {
821 break;
822 } else if (ret == BLKPREP_DEFER) {
823 /*
824 * the request may have been (partially) prepped.
825 * we need to keep this request in the front to
826 * avoid resource deadlock. REQ_STARTED will
827 * prevent other fs requests from passing this one.
828 */
829 if (q->dma_drain_size && rq->data_len &&
830 !(rq->cmd_flags & REQ_DONTPREP)) {
831 /*
832 * remove the space for the drain we added
833 * so that we don't add it again
834 */
835 --rq->nr_phys_segments;
836 }
837
838 rq = NULL;
839 break;
840 } else if (ret == BLKPREP_KILL) {
841 rq->cmd_flags |= REQ_QUIET;
842 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
843 } else {
844 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
845 break;
846 }
847 }
848
849 return rq;
850}
851EXPORT_SYMBOL(elv_next_request);
852
853void elv_dequeue_request(struct request_queue *q, struct request *rq)
854{
855 BUG_ON(list_empty(&rq->queuelist));
856 BUG_ON(ELV_ON_HASH(rq));
857
858 list_del_init(&rq->queuelist);
859
860 /*
861 * the time frame between a request being removed from the lists
862 * and to it is freed is accounted as io that is in progress at
863 * the driver side.
864 */
865 if (blk_account_rq(rq))
866 q->in_flight++;
867}
868
869int elv_queue_empty(struct request_queue *q) 743int elv_queue_empty(struct request_queue *q)
870{ 744{
871 struct elevator_queue *e = q->elevator; 745 struct elevator_queue *e = q->elevator;
@@ -935,7 +809,12 @@ void elv_abort_queue(struct request_queue *q)
935 rq = list_entry_rq(q->queue_head.next); 809 rq = list_entry_rq(q->queue_head.next);
936 rq->cmd_flags |= REQ_QUIET; 810 rq->cmd_flags |= REQ_QUIET;
937 trace_block_rq_abort(q, rq); 811 trace_block_rq_abort(q, rq);
938 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 812 /*
813 * Mark this request as started so we don't trigger
814 * any debug logic in the end I/O path.
815 */
816 blk_start_request(rq);
817 __blk_end_request_all(rq, -EIO);
939 } 818 }
940} 819}
941EXPORT_SYMBOL(elv_abort_queue); 820EXPORT_SYMBOL(elv_abort_queue);
@@ -948,7 +827,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
948 * request is released from the driver, io must be done 827 * request is released from the driver, io must be done
949 */ 828 */
950 if (blk_account_rq(rq)) { 829 if (blk_account_rq(rq)) {
951 q->in_flight--; 830 q->in_flight[rq_is_sync(rq)]--;
952 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 831 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
953 e->ops->elevator_completed_req_fn(q, rq); 832 e->ops->elevator_completed_req_fn(q, rq);
954 } 833 }
@@ -963,11 +842,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
963 if (!list_empty(&q->queue_head)) 842 if (!list_empty(&q->queue_head))
964 next = list_entry_rq(q->queue_head.next); 843 next = list_entry_rq(q->queue_head.next);
965 844
966 if (!q->in_flight && 845 if (!queue_in_flight(q) &&
967 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 846 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
968 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 847 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
969 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 848 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
970 blk_start_queueing(q); 849 __blk_run_queue(q);
971 } 850 }
972 } 851 }
973} 852}
@@ -1175,6 +1054,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1175 char elevator_name[ELV_NAME_MAX]; 1054 char elevator_name[ELV_NAME_MAX];
1176 struct elevator_type *e; 1055 struct elevator_type *e;
1177 1056
1057 if (!q->elevator)
1058 return count;
1059
1178 strlcpy(elevator_name, name, sizeof(elevator_name)); 1060 strlcpy(elevator_name, name, sizeof(elevator_name));
1179 strstrip(elevator_name); 1061 strstrip(elevator_name);
1180 1062
@@ -1198,10 +1080,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1198ssize_t elv_iosched_show(struct request_queue *q, char *name) 1080ssize_t elv_iosched_show(struct request_queue *q, char *name)
1199{ 1081{
1200 struct elevator_queue *e = q->elevator; 1082 struct elevator_queue *e = q->elevator;
1201 struct elevator_type *elv = e->elevator_type; 1083 struct elevator_type *elv;
1202 struct elevator_type *__e; 1084 struct elevator_type *__e;
1203 int len = 0; 1085 int len = 0;
1204 1086
1087 if (!q->elevator)
1088 return sprintf(name, "none\n");
1089
1090 elv = e->elevator_type;
1091
1205 spin_lock(&elv_list_lock); 1092 spin_lock(&elv_list_lock);
1206 list_for_each_entry(__e, &elv_list, list) { 1093 list_for_each_entry(__e, &elv_list, list) {
1207 if (!strcmp(elv->elevator_name, __e->elevator_name)) 1094 if (!strcmp(elv->elevator_name, __e->elevator_name))
diff --git a/block/genhd.c b/block/genhd.c
index 1a4916e01732..fe7ccc0a618f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev,
852 return sprintf(buf, "%x\n", disk->flags); 852 return sprintf(buf, "%x\n", disk->flags);
853} 853}
854 854
855static ssize_t disk_alignment_offset_show(struct device *dev,
856 struct device_attribute *attr,
857 char *buf)
858{
859 struct gendisk *disk = dev_to_disk(dev);
860
861 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
862}
863
855static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 864static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
856static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); 865static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
857static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 866static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
858static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); 867static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
859static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 868static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
860static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
861static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
862#ifdef CONFIG_FAIL_MAKE_REQUEST 872#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = {
875 &dev_attr_removable.attr, 885 &dev_attr_removable.attr,
876 &dev_attr_ro.attr, 886 &dev_attr_ro.attr,
877 &dev_attr_size.attr, 887 &dev_attr_size.attr,
888 &dev_attr_alignment_offset.attr,
878 &dev_attr_capability.attr, 889 &dev_attr_capability.attr,
879 &dev_attr_stat.attr, 890 &dev_attr_stat.attr,
880#ifdef CONFIG_FAIL_MAKE_REQUEST 891#ifdef CONFIG_FAIL_MAKE_REQUEST
diff --git a/block/ioctl.c b/block/ioctl.c
index ad474d4bbcce..500e4c73cc52 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
152 bio->bi_private = &wait; 152 bio->bi_private = &wait;
153 bio->bi_sector = start; 153 bio->bi_sector = start;
154 154
155 if (len > q->max_hw_sectors) { 155 if (len > queue_max_hw_sectors(q)) {
156 bio->bi_size = q->max_hw_sectors << 9; 156 bio->bi_size = queue_max_hw_sectors(q) << 9;
157 len -= q->max_hw_sectors; 157 len -= queue_max_hw_sectors(q);
158 start += q->max_hw_sectors; 158 start += queue_max_hw_sectors(q);
159 } else { 159 } else {
160 bio->bi_size = len << 9; 160 bio->bi_size = len << 9;
161 len = 0; 161 len = 0;
@@ -311,9 +311,9 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
312 return put_int(arg, block_size(bdev)); 312 return put_int(arg, block_size(bdev));
313 case BLKSSZGET: /* get block device hardware sector size */ 313 case BLKSSZGET: /* get block device hardware sector size */
314 return put_int(arg, bdev_hardsect_size(bdev)); 314 return put_int(arg, bdev_logical_block_size(bdev));
315 case BLKSECTGET: 315 case BLKSECTGET:
316 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 316 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
317 case BLKRASET: 317 case BLKRASET:
318 case BLKFRASET: 318 case BLKFRASET:
319 if(!capable(CAP_SYS_ADMIN)) 319 if(!capable(CAP_SYS_ADMIN))
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 82a0ca2f6729..5f8e798ede4e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
75 75
76static int sg_get_reserved_size(struct request_queue *q, int __user *p) 76static int sg_get_reserved_size(struct request_queue *q, int __user *p)
77{ 77{
78 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 78 unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
79 79
80 return put_user(val, p); 80 return put_user(val, p);
81} 81}
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
89 89
90 if (size < 0) 90 if (size < 0)
91 return -EINVAL; 91 return -EINVAL;
92 if (size > (q->max_sectors << 9)) 92 if (size > (queue_max_sectors(q) << 9))
93 size = q->max_sectors << 9; 93 size = queue_max_sectors(q) << 9;
94 94
95 q->sg_reserved_size = size; 95 q->sg_reserved_size = size;
96 return 0; 96 return 0;
@@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
230 hdr->info = 0; 230 hdr->info = 0;
231 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 231 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
232 hdr->info |= SG_INFO_CHECK; 232 hdr->info |= SG_INFO_CHECK;
233 hdr->resid = rq->data_len; 233 hdr->resid = rq->resid_len;
234 hdr->sb_len_wr = 0; 234 hdr->sb_len_wr = 0;
235 235
236 if (rq->sense_len && hdr->sbp) { 236 if (rq->sense_len && hdr->sbp) {
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
264 if (hdr->cmd_len > BLK_MAX_CDB) 264 if (hdr->cmd_len > BLK_MAX_CDB)
265 return -EINVAL; 265 return -EINVAL;
266 266
267 if (hdr->dxfer_len > (q->max_hw_sectors << 9)) 267 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
268 return -EIO; 268 return -EIO;
269 269
270 if (hdr->dxfer_len) 270 if (hdr->dxfer_len)
@@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
500 500
501 rq = blk_get_request(q, WRITE, __GFP_WAIT); 501 rq = blk_get_request(q, WRITE, __GFP_WAIT);
502 rq->cmd_type = REQ_TYPE_BLOCK_PC; 502 rq->cmd_type = REQ_TYPE_BLOCK_PC;
503 rq->data = NULL;
504 rq->data_len = 0;
505 rq->extra_len = 0;
506 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 503 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
507 rq->cmd[0] = cmd; 504 rq->cmd[0] = cmd;
508 rq->cmd[4] = data; 505 rq->cmd[4] = data;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9f..d0dfeef55db5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
1084 if (likely(!blk_pc_request(rq))) 1084 if (likely(!blk_pc_request(rq)))
1085 return 0; 1085 return 0;
1086 1086
1087 if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
1088 return 0; 1088 return 0;
1089 1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f2..668dc234b8e2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3321 DAC960_Command_T *Command; 3321 DAC960_Command_T *Command;
3322 3322
3323 while(1) { 3323 while(1) {
3324 Request = elv_next_request(req_q); 3324 Request = blk_peek_request(req_q);
3325 if (!Request) 3325 if (!Request)
3326 return 1; 3326 return 1;
3327 3327
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3338 } 3338 }
3339 Command->Completion = Request->end_io_data; 3339 Command->Completion = Request->end_io_data;
3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3341 Command->BlockNumber = Request->sector; 3341 Command->BlockNumber = blk_rq_pos(Request);
3342 Command->BlockCount = Request->nr_sectors; 3342 Command->BlockCount = blk_rq_sectors(Request);
3343 Command->Request = Request; 3343 Command->Request = Request;
3344 blkdev_dequeue_request(Request); 3344 blk_start_request(Request);
3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3345 Command->SegmentCount = blk_rq_map_sg(req_q,
3346 Command->Request, Command->cmd_sglist); 3346 Command->Request, Command->cmd_sglist);
3347 /* pci_map_sg MAY change the value of SegCount */ 3347 /* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3431 * successfully as possible. 3431 * successfully as possible.
3432 */ 3432 */
3433 Command->SegmentCount = 1; 3433 Command->SegmentCount = 1;
3434 Command->BlockNumber = Request->sector; 3434 Command->BlockNumber = blk_rq_pos(Request);
3435 Command->BlockCount = 1; 3435 Command->BlockCount = 1;
3436 DAC960_QueueReadWriteCommand(Command); 3436 DAC960_QueueReadWriteCommand(Command);
3437 return; 3437 return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc9..f42fa50d3550 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
412 412
413config MG_DISK 413config MG_DISK
414 tristate "mGine mflash, gflash support" 414 tristate "mGine mflash, gflash support"
415 depends on ARM && ATA && GPIOLIB 415 depends on ARM && GPIOLIB
416 help 416 help
417 mGine mFlash(gFlash) block device driver 417 mGine mFlash(gFlash) block device driver
418 418
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff7068..9c6e5b0fe894 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
112MODULE_LICENSE("GPL"); 112MODULE_LICENSE("GPL");
113 113
114static struct request_queue *floppy_queue; 114static struct request_queue *floppy_queue;
115#define QUEUE (floppy_queue)
116#define CURRENT elv_next_request(floppy_queue)
117 115
118/* 116/*
119 * Macros 117 * Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
1335 1333
1336static void redo_fd_request(void) 1334static void redo_fd_request(void)
1337{ 1335{
1336 struct request *rq;
1338 unsigned int cnt, block, track, sector; 1337 unsigned int cnt, block, track, sector;
1339 int drive; 1338 int drive;
1340 struct amiga_floppy_struct *floppy; 1339 struct amiga_floppy_struct *floppy;
1341 char *data; 1340 char *data;
1342 unsigned long flags; 1341 unsigned long flags;
1342 int err;
1343 1343
1344 repeat: 1344next_req:
1345 if (!CURRENT) { 1345 rq = blk_fetch_request(floppy_queue);
1346 if (!rq) {
1346 /* Nothing left to do */ 1347 /* Nothing left to do */
1347 return; 1348 return;
1348 } 1349 }
1349 1350
1350 floppy = CURRENT->rq_disk->private_data; 1351 floppy = rq->rq_disk->private_data;
1351 drive = floppy - unit; 1352 drive = floppy - unit;
1352 1353
1354next_segment:
1353 /* Here someone could investigate to be more efficient */ 1355 /* Here someone could investigate to be more efficient */
1354 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1356 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
1355#ifdef DEBUG 1357#ifdef DEBUG
1356 printk("fd: sector %ld + %d requested for %s\n", 1358 printk("fd: sector %ld + %d requested for %s\n",
1357 CURRENT->sector,cnt, 1359 blk_rq_pos(rq), cnt,
1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1360 (rq_data_dir(rq) == READ) ? "read" : "write");
1359#endif 1361#endif
1360 block = CURRENT->sector + cnt; 1362 block = blk_rq_pos(rq) + cnt;
1361 if ((int)block > floppy->blocks) { 1363 if ((int)block > floppy->blocks) {
1362 end_request(CURRENT, 0); 1364 err = -EIO;
1363 goto repeat; 1365 break;
1364 } 1366 }
1365 1367
1366 track = block / (floppy->dtype->sects * floppy->type->sect_mult); 1368 track = block / (floppy->dtype->sects * floppy->type->sect_mult);
1367 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); 1369 sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
1368 data = CURRENT->buffer + 512 * cnt; 1370 data = rq->buffer + 512 * cnt;
1369#ifdef DEBUG 1371#ifdef DEBUG
1370 printk("access to track %d, sector %d, with buffer at " 1372 printk("access to track %d, sector %d, with buffer at "
1371 "0x%08lx\n", track, sector, data); 1373 "0x%08lx\n", track, sector, data);
1372#endif 1374#endif
1373 1375
1374 if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
1375 printk(KERN_WARNING "do_fd_request: unknown command\n");
1376 end_request(CURRENT, 0);
1377 goto repeat;
1378 }
1379 if (get_track(drive, track) == -1) { 1376 if (get_track(drive, track) == -1) {
1380 end_request(CURRENT, 0); 1377 err = -EIO;
1381 goto repeat; 1378 break;
1382 } 1379 }
1383 1380
1384 switch (rq_data_dir(CURRENT)) { 1381 if (rq_data_dir(rq) == READ) {
1385 case READ:
1386 memcpy(data, floppy->trackbuf + sector * 512, 512); 1382 memcpy(data, floppy->trackbuf + sector * 512, 512);
1387 break; 1383 } else {
1388
1389 case WRITE:
1390 memcpy(floppy->trackbuf + sector * 512, data, 512); 1384 memcpy(floppy->trackbuf + sector * 512, data, 512);
1391 1385
1392 /* keep the drive spinning while writes are scheduled */ 1386 /* keep the drive spinning while writes are scheduled */
1393 if (!fd_motor_on(drive)) { 1387 if (!fd_motor_on(drive)) {
1394 end_request(CURRENT, 0); 1388 err = -EIO;
1395 goto repeat; 1389 break;
1396 } 1390 }
1397 /* 1391 /*
1398 * setup a callback to write the track buffer 1392 * setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
1404 /* reset the timer */ 1398 /* reset the timer */
1405 mod_timer (flush_track_timer + drive, jiffies + 1); 1399 mod_timer (flush_track_timer + drive, jiffies + 1);
1406 local_irq_restore(flags); 1400 local_irq_restore(flags);
1407 break;
1408 } 1401 }
1409 } 1402 }
1410 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1411 CURRENT->sector += CURRENT->current_nr_sectors;
1412 1403
1413 end_request(CURRENT, 1); 1404 if (__blk_end_request_cur(rq, err))
1414 goto repeat; 1405 goto next_segment;
1406 goto next_req;
1415} 1407}
1416 1408
1417static void do_fd_request(struct request_queue * q) 1409static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4c..f5e7180d7f47 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
79#undef DEBUG 79#undef DEBUG
80 80
81static struct request_queue *floppy_queue; 81static struct request_queue *floppy_queue;
82 82static struct request *fd_request;
83#define QUEUE (floppy_queue)
84#define CURRENT elv_next_request(floppy_queue)
85 83
86/* Disk types: DD, HD, ED */ 84/* Disk types: DD, HD, ED */
87static struct atari_disk_type { 85static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
376static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 374static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
377static DEFINE_TIMER(fd_timer, check_change, 0, 0); 375static DEFINE_TIMER(fd_timer, check_change, 0, 0);
378 376
377static void fd_end_request_cur(int err)
378{
379 if (!__blk_end_request_cur(fd_request, err))
380 fd_request = NULL;
381}
382
379static inline void start_motor_off_timer(void) 383static inline void start_motor_off_timer(void)
380{ 384{
381 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); 385 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
606 return; 610 return;
607 } 611 }
608 612
609 if (!CURRENT) 613 if (!fd_request)
610 return; 614 return;
611 615
612 CURRENT->errors++; 616 fd_request->errors++;
613 if (CURRENT->errors >= MAX_ERRORS) { 617 if (fd_request->errors >= MAX_ERRORS) {
614 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); 618 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
615 end_request(CURRENT, 0); 619 fd_end_request_cur(-EIO);
616 } 620 }
617 else if (CURRENT->errors == RECALIBRATE_ERRORS) { 621 else if (fd_request->errors == RECALIBRATE_ERRORS) {
618 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); 622 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
619 if (SelectedDrive != -1) 623 if (SelectedDrive != -1)
620 SUD.track = -1; 624 SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 729 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
726 if (ReqCmd == READ) { 730 if (ReqCmd == READ) {
727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 731 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
728 if (++ReqCnt < CURRENT->current_nr_sectors) { 732 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
729 /* read next sector */ 733 /* read next sector */
730 setup_req_params( drive ); 734 setup_req_params( drive );
731 goto repeat; 735 goto repeat;
732 } 736 }
733 else { 737 else {
734 /* all sectors finished */ 738 /* all sectors finished */
735 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 739 fd_end_request_cur(0);
736 CURRENT->sector += CURRENT->current_nr_sectors;
737 end_request(CURRENT, 1);
738 redo_fd_request(); 740 redo_fd_request();
739 return; 741 return;
740 } 742 }
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (++ReqCnt < CURRENT->current_nr_sectors) { 1137 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
1136 /* read next sector */ 1138 /* read next sector */
1137 setup_req_params( SelectedDrive ); 1139 setup_req_params( SelectedDrive );
1138 do_fd_action( SelectedDrive ); 1140 do_fd_action( SelectedDrive );
1139 } 1141 }
1140 else { 1142 else {
1141 /* all sectors finished */ 1143 /* all sectors finished */
1142 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1144 fd_end_request_cur(0);
1143 CURRENT->sector += CURRENT->current_nr_sectors;
1144 end_request(CURRENT, 1);
1145 redo_fd_request(); 1145 redo_fd_request();
1146 } 1146 }
1147 return; 1147 return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
1382 ReqData = ReqBuffer + 512 * ReqCnt; 1382 ReqData = ReqBuffer + 512 * ReqCnt;
1383 1383
1384 if (UseTrackbuffer) 1384 if (UseTrackbuffer)
1385 read_track = (ReqCmd == READ && CURRENT->errors == 0); 1385 read_track = (ReqCmd == READ && fd_request->errors == 0);
1386 else 1386 else
1387 read_track = 0; 1387 read_track = 0;
1388 1388
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
1396 int drive, type; 1396 int drive, type;
1397 struct atari_floppy_struct *floppy; 1397 struct atari_floppy_struct *floppy;
1398 1398
1399 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1399 DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
1400 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1400 fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
1401 CURRENT ? CURRENT->sector : 0 )); 1401 fd_request ? blk_rq_pos(fd_request) : 0 ));
1402 1402
1403 IsFormatting = 0; 1403 IsFormatting = 0;
1404 1404
1405repeat: 1405repeat:
1406 if (!fd_request) {
1407 fd_request = blk_fetch_request(floppy_queue);
1408 if (!fd_request)
1409 goto the_end;
1410 }
1406 1411
1407 if (!CURRENT) 1412 floppy = fd_request->rq_disk->private_data;
1408 goto the_end;
1409
1410 floppy = CURRENT->rq_disk->private_data;
1411 drive = floppy - unit; 1413 drive = floppy - unit;
1412 type = floppy->type; 1414 type = floppy->type;
1413 1415
1414 if (!UD.connected) { 1416 if (!UD.connected) {
1415 /* drive not connected */ 1417 /* drive not connected */
1416 printk(KERN_ERR "Unknown Device: fd%d\n", drive ); 1418 printk(KERN_ERR "Unknown Device: fd%d\n", drive );
1417 end_request(CURRENT, 0); 1419 fd_end_request_cur(-EIO);
1418 goto repeat; 1420 goto repeat;
1419 } 1421 }
1420 1422
@@ -1430,12 +1432,12 @@ repeat:
1430 /* user supplied disk type */ 1432 /* user supplied disk type */
1431 if (--type >= NUM_DISK_MINORS) { 1433 if (--type >= NUM_DISK_MINORS) {
1432 printk(KERN_WARNING "fd%d: invalid disk format", drive ); 1434 printk(KERN_WARNING "fd%d: invalid disk format", drive );
1433 end_request(CURRENT, 0); 1435 fd_end_request_cur(-EIO);
1434 goto repeat; 1436 goto repeat;
1435 } 1437 }
1436 if (minor2disktype[type].drive_types > DriveType) { 1438 if (minor2disktype[type].drive_types > DriveType) {
1437 printk(KERN_WARNING "fd%d: unsupported disk format", drive ); 1439 printk(KERN_WARNING "fd%d: unsupported disk format", drive );
1438 end_request(CURRENT, 0); 1440 fd_end_request_cur(-EIO);
1439 goto repeat; 1441 goto repeat;
1440 } 1442 }
1441 type = minor2disktype[type].index; 1443 type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
1444 UD.autoprobe = 0; 1446 UD.autoprobe = 0;
1445 } 1447 }
1446 1448
1447 if (CURRENT->sector + 1 > UDT->blocks) { 1449 if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
1448 end_request(CURRENT, 0); 1450 fd_end_request_cur(-EIO);
1449 goto repeat; 1451 goto repeat;
1450 } 1452 }
1451 1453
@@ -1453,9 +1455,9 @@ repeat:
1453 del_timer( &motor_off_timer ); 1455 del_timer( &motor_off_timer );
1454 1456
1455 ReqCnt = 0; 1457 ReqCnt = 0;
1456 ReqCmd = rq_data_dir(CURRENT); 1458 ReqCmd = rq_data_dir(fd_request);
1457 ReqBlock = CURRENT->sector; 1459 ReqBlock = blk_rq_pos(fd_request);
1458 ReqBuffer = CURRENT->buffer; 1460 ReqBuffer = fd_request->buffer;
1459 setup_req_params( drive ); 1461 setup_req_params( drive );
1460 do_fd_action( drive ); 1462 do_fd_action( drive );
1461 1463
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 5f7e64ba87e5..4bf8705b3ace 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
407 rd_size = simple_strtol(str, NULL, 0); 407 rd_size = simple_strtol(str, NULL, 0);
408 return 1; 408 return 1;
409} 409}
410static int __init ramdisk_size2(char *str) 410__setup("ramdisk_size=", ramdisk_size);
411{
412 return ramdisk_size(str);
413}
414__setup("ramdisk=", ramdisk_size);
415__setup("ramdisk_size=", ramdisk_size2);
416#endif 411#endif
417 412
418/* 413/*
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa6..b22cec97ea19 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -180,11 +180,13 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
180 __u32); 180 __u32);
181static void start_io(ctlr_info_t *h); 181static void start_io(ctlr_info_t *h);
182static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, 182static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
183 unsigned int use_unit_num, unsigned int log_unit,
184 __u8 page_code, unsigned char *scsi3addr, int cmd_type); 183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
185static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 184static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
186 unsigned int use_unit_num, unsigned int log_unit, 185 __u8 page_code, unsigned char scsi3addr[],
187 __u8 page_code, int cmd_type); 186 int cmd_type);
187static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
188 int attempt_retry);
189static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
188 190
189static void fail_all_cmds(unsigned long ctlr); 191static void fail_all_cmds(unsigned long ctlr);
190static int scan_thread(void *data); 192static int scan_thread(void *data);
@@ -437,6 +439,194 @@ static void __devinit cciss_procinit(int i)
437} 439}
438#endif /* CONFIG_PROC_FS */ 440#endif /* CONFIG_PROC_FS */
439 441
442#define MAX_PRODUCT_NAME_LEN 19
443
444#define to_hba(n) container_of(n, struct ctlr_info, dev)
445#define to_drv(n) container_of(n, drive_info_struct, dev)
446
447static struct device_type cciss_host_type = {
448 .name = "cciss_host",
449};
450
451static ssize_t dev_show_unique_id(struct device *dev,
452 struct device_attribute *attr,
453 char *buf)
454{
455 drive_info_struct *drv = to_drv(dev);
456 struct ctlr_info *h = to_hba(drv->dev.parent);
457 __u8 sn[16];
458 unsigned long flags;
459 int ret = 0;
460
461 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
462 if (h->busy_configuring)
463 ret = -EBUSY;
464 else
465 memcpy(sn, drv->serial_no, sizeof(sn));
466 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
467
468 if (ret)
469 return ret;
470 else
471 return snprintf(buf, 16 * 2 + 2,
472 "%02X%02X%02X%02X%02X%02X%02X%02X"
473 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
474 sn[0], sn[1], sn[2], sn[3],
475 sn[4], sn[5], sn[6], sn[7],
476 sn[8], sn[9], sn[10], sn[11],
477 sn[12], sn[13], sn[14], sn[15]);
478}
479DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
480
481static ssize_t dev_show_vendor(struct device *dev,
482 struct device_attribute *attr,
483 char *buf)
484{
485 drive_info_struct *drv = to_drv(dev);
486 struct ctlr_info *h = to_hba(drv->dev.parent);
487 char vendor[VENDOR_LEN + 1];
488 unsigned long flags;
489 int ret = 0;
490
491 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
492 if (h->busy_configuring)
493 ret = -EBUSY;
494 else
495 memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
496 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
497
498 if (ret)
499 return ret;
500 else
501 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
502}
503DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
504
505static ssize_t dev_show_model(struct device *dev,
506 struct device_attribute *attr,
507 char *buf)
508{
509 drive_info_struct *drv = to_drv(dev);
510 struct ctlr_info *h = to_hba(drv->dev.parent);
511 char model[MODEL_LEN + 1];
512 unsigned long flags;
513 int ret = 0;
514
515 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
516 if (h->busy_configuring)
517 ret = -EBUSY;
518 else
519 memcpy(model, drv->model, MODEL_LEN + 1);
520 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
521
522 if (ret)
523 return ret;
524 else
525 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
526}
527DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
528
529static ssize_t dev_show_rev(struct device *dev,
530 struct device_attribute *attr,
531 char *buf)
532{
533 drive_info_struct *drv = to_drv(dev);
534 struct ctlr_info *h = to_hba(drv->dev.parent);
535 char rev[REV_LEN + 1];
536 unsigned long flags;
537 int ret = 0;
538
539 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
540 if (h->busy_configuring)
541 ret = -EBUSY;
542 else
543 memcpy(rev, drv->rev, REV_LEN + 1);
544 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
545
546 if (ret)
547 return ret;
548 else
549 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
550}
551DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
552
553static struct attribute *cciss_dev_attrs[] = {
554 &dev_attr_unique_id.attr,
555 &dev_attr_model.attr,
556 &dev_attr_vendor.attr,
557 &dev_attr_rev.attr,
558 NULL
559};
560
561static struct attribute_group cciss_dev_attr_group = {
562 .attrs = cciss_dev_attrs,
563};
564
565static struct attribute_group *cciss_dev_attr_groups[] = {
566 &cciss_dev_attr_group,
567 NULL
568};
569
570static struct device_type cciss_dev_type = {
571 .name = "cciss_device",
572 .groups = cciss_dev_attr_groups,
573};
574
575static struct bus_type cciss_bus_type = {
576 .name = "cciss",
577};
578
579
580/*
581 * Initialize sysfs entry for each controller. This sets up and registers
582 * the 'cciss#' directory for each individual controller under
583 * /sys/bus/pci/devices/<dev>/.
584 */
585static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
586{
587 device_initialize(&h->dev);
588 h->dev.type = &cciss_host_type;
589 h->dev.bus = &cciss_bus_type;
590 dev_set_name(&h->dev, "%s", h->devname);
591 h->dev.parent = &h->pdev->dev;
592
593 return device_add(&h->dev);
594}
595
596/*
597 * Remove sysfs entries for an hba.
598 */
599static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
600{
601 device_del(&h->dev);
602}
603
604/*
605 * Initialize sysfs for each logical drive. This sets up and registers
606 * the 'c#d#' directory for each individual logical drive under
607 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
608 * /sys/block/cciss!c#d# to this entry.
609 */
610static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
611 drive_info_struct *drv,
612 int drv_index)
613{
614 device_initialize(&drv->dev);
615 drv->dev.type = &cciss_dev_type;
616 drv->dev.bus = &cciss_bus_type;
617 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
618 drv->dev.parent = &h->dev;
619 return device_add(&drv->dev);
620}
621
622/*
623 * Remove sysfs entries for a logical drive.
624 */
625static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
626{
627 device_del(&drv->dev);
628}
629
440/* 630/*
441 * For operations that cannot sleep, a command block is allocated at init, 631 * For operations that cannot sleep, a command block is allocated at init,
442 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 632 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -1299,7 +1489,6 @@ static void cciss_softirq_done(struct request *rq)
1299{ 1489{
1300 CommandList_struct *cmd = rq->completion_data; 1490 CommandList_struct *cmd = rq->completion_data;
1301 ctlr_info_t *h = hba[cmd->ctlr]; 1491 ctlr_info_t *h = hba[cmd->ctlr];
1302 unsigned int nr_bytes;
1303 unsigned long flags; 1492 unsigned long flags;
1304 u64bit temp64; 1493 u64bit temp64;
1305 int i, ddir; 1494 int i, ddir;
@@ -1321,15 +1510,11 @@ static void cciss_softirq_done(struct request *rq)
1321 printk("Done with %p\n", rq); 1510 printk("Done with %p\n", rq);
1322#endif /* CCISS_DEBUG */ 1511#endif /* CCISS_DEBUG */
1323 1512
1324 /* 1513 /* set the residual count for pc requests */
1325 * Store the full size and set the residual count for pc requests
1326 */
1327 nr_bytes = blk_rq_bytes(rq);
1328 if (blk_pc_request(rq)) 1514 if (blk_pc_request(rq))
1329 rq->data_len = cmd->err_info->ResidualCnt; 1515 rq->resid_len = cmd->err_info->ResidualCnt;
1330 1516
1331 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) 1517 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1332 BUG();
1333 1518
1334 spin_lock_irqsave(&h->lock, flags); 1519 spin_lock_irqsave(&h->lock, flags);
1335 cmd_free(h, cmd, 1); 1520 cmd_free(h, cmd, 1);
@@ -1337,6 +1522,56 @@ static void cciss_softirq_done(struct request *rq)
1337 spin_unlock_irqrestore(&h->lock, flags); 1522 spin_unlock_irqrestore(&h->lock, flags);
1338} 1523}
1339 1524
1525static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
1526 uint32_t log_unit)
1527{
1528 log_unit = h->drv[log_unit].LunID & 0x03fff;
1529 memset(&scsi3addr[4], 0, 4);
1530 memcpy(&scsi3addr[0], &log_unit, 4);
1531 scsi3addr[3] |= 0x40;
1532}
1533
1534/* This function gets the SCSI vendor, model, and revision of a logical drive
1535 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
1536 * they cannot be read.
1537 */
1538static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
1539 char *vendor, char *model, char *rev)
1540{
1541 int rc;
1542 InquiryData_struct *inq_buf;
1543 unsigned char scsi3addr[8];
1544
1545 *vendor = '\0';
1546 *model = '\0';
1547 *rev = '\0';
1548
1549 inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1550 if (!inq_buf)
1551 return;
1552
1553 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1554 if (withirq)
1555 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
1556 sizeof(InquiryData_struct), 0,
1557 scsi3addr, TYPE_CMD);
1558 else
1559 rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
1560 sizeof(InquiryData_struct), 0,
1561 scsi3addr, TYPE_CMD);
1562 if (rc == IO_OK) {
1563 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
1564 vendor[VENDOR_LEN] = '\0';
1565 memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
1566 model[MODEL_LEN] = '\0';
1567 memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
1568 rev[REV_LEN] = '\0';
1569 }
1570
1571 kfree(inq_buf);
1572 return;
1573}
1574
1340/* This function gets the serial number of a logical drive via 1575/* This function gets the serial number of a logical drive via
1341 * inquiry page 0x83. Serial no. is 16 bytes. If the serial 1576 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1342 * number cannot be had, for whatever reason, 16 bytes of 0xff 1577 * number cannot be had, for whatever reason, 16 bytes of 0xff
@@ -1348,6 +1583,7 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1348#define PAGE_83_INQ_BYTES 64 1583#define PAGE_83_INQ_BYTES 64
1349 int rc; 1584 int rc;
1350 unsigned char *buf; 1585 unsigned char *buf;
1586 unsigned char scsi3addr[8];
1351 1587
1352 if (buflen > 16) 1588 if (buflen > 16)
1353 buflen = 16; 1589 buflen = 16;
@@ -1356,12 +1592,13 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1356 if (!buf) 1592 if (!buf)
1357 return; 1593 return;
1358 memset(serial_no, 0, buflen); 1594 memset(serial_no, 0, buflen);
1595 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1359 if (withirq) 1596 if (withirq)
1360 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, 1597 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1361 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD); 1598 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1362 else 1599 else
1363 rc = sendcmd(CISS_INQUIRY, ctlr, buf, 1600 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1364 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD); 1601 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1365 if (rc == IO_OK) 1602 if (rc == IO_OK)
1366 memcpy(serial_no, &buf[8], buflen); 1603 memcpy(serial_no, &buf[8], buflen);
1367 kfree(buf); 1604 kfree(buf);
@@ -1377,7 +1614,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1377 disk->first_minor = drv_index << NWD_SHIFT; 1614 disk->first_minor = drv_index << NWD_SHIFT;
1378 disk->fops = &cciss_fops; 1615 disk->fops = &cciss_fops;
1379 disk->private_data = &h->drv[drv_index]; 1616 disk->private_data = &h->drv[drv_index];
1380 disk->driverfs_dev = &h->pdev->dev; 1617 disk->driverfs_dev = &h->drv[drv_index].dev;
1381 1618
1382 /* Set up queue information */ 1619 /* Set up queue information */
1383 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1620 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1394,8 +1631,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1394 1631
1395 disk->queue->queuedata = h; 1632 disk->queue->queuedata = h;
1396 1633
1397 blk_queue_hardsect_size(disk->queue, 1634 blk_queue_logical_block_size(disk->queue,
1398 h->drv[drv_index].block_size); 1635 h->drv[drv_index].block_size);
1399 1636
1400 /* Make sure all queue data is written out before */ 1637 /* Make sure all queue data is written out before */
1401 /* setting h->drv[drv_index].queue, as setting this */ 1638 /* setting h->drv[drv_index].queue, as setting this */
@@ -1468,6 +1705,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1468 drvinfo->block_size = block_size; 1705 drvinfo->block_size = block_size;
1469 drvinfo->nr_blocks = total_size + 1; 1706 drvinfo->nr_blocks = total_size + 1;
1470 1707
1708 cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
1709 drvinfo->model, drvinfo->rev);
1471 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1710 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1472 sizeof(drvinfo->serial_no)); 1711 sizeof(drvinfo->serial_no));
1473 1712
@@ -1517,6 +1756,9 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1517 h->drv[drv_index].cylinders = drvinfo->cylinders; 1756 h->drv[drv_index].cylinders = drvinfo->cylinders;
1518 h->drv[drv_index].raid_level = drvinfo->raid_level; 1757 h->drv[drv_index].raid_level = drvinfo->raid_level;
1519 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1758 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1759 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
1760 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
1761 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
1520 1762
1521 ++h->num_luns; 1763 ++h->num_luns;
1522 disk = h->gendisk[drv_index]; 1764 disk = h->gendisk[drv_index];
@@ -1591,6 +1833,8 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1591 } 1833 }
1592 } 1834 }
1593 h->drv[drv_index].LunID = lunid; 1835 h->drv[drv_index].LunID = lunid;
1836 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
1837 goto err_free_disk;
1594 1838
1595 /* Don't need to mark this busy because nobody */ 1839 /* Don't need to mark this busy because nobody */
1596 /* else knows about this disk yet to contend */ 1840 /* else knows about this disk yet to contend */
@@ -1598,6 +1842,11 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1598 h->drv[drv_index].busy_configuring = 0; 1842 h->drv[drv_index].busy_configuring = 0;
1599 wmb(); 1843 wmb();
1600 return drv_index; 1844 return drv_index;
1845
1846err_free_disk:
1847 put_disk(h->gendisk[drv_index]);
1848 h->gendisk[drv_index] = NULL;
1849 return -1;
1601} 1850}
1602 1851
1603/* This is for the special case of a controller which 1852/* This is for the special case of a controller which
@@ -1668,8 +1917,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1668 goto mem_msg; 1917 goto mem_msg;
1669 1918
1670 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 1919 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1671 sizeof(ReportLunData_struct), 0, 1920 sizeof(ReportLunData_struct),
1672 0, 0, TYPE_CMD); 1921 0, CTLR_LUNID, TYPE_CMD);
1673 1922
1674 if (return_code == IO_OK) 1923 if (return_code == IO_OK)
1675 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 1924 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
@@ -1718,6 +1967,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1718 h->drv[i].busy_configuring = 1; 1967 h->drv[i].busy_configuring = 1;
1719 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1968 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1720 return_code = deregister_disk(h, i, 1); 1969 return_code = deregister_disk(h, i, 1);
1970 cciss_destroy_ld_sysfs_entry(&h->drv[i]);
1721 h->drv[i].busy_configuring = 0; 1971 h->drv[i].busy_configuring = 0;
1722 } 1972 }
1723 } 1973 }
@@ -1877,11 +2127,9 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
1877 return 0; 2127 return 0;
1878} 2128}
1879 2129
1880static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2130static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1881 1: address logical volume log_unit, 2131 size_t size, __u8 page_code, unsigned char *scsi3addr,
1882 2: periph device address is scsi3addr */ 2132 int cmd_type)
1883 unsigned int log_unit, __u8 page_code,
1884 unsigned char *scsi3addr, int cmd_type)
1885{ 2133{
1886 ctlr_info_t *h = hba[ctlr]; 2134 ctlr_info_t *h = hba[ctlr];
1887 u64bit buff_dma_handle; 2135 u64bit buff_dma_handle;
@@ -1897,27 +2145,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1897 c->Header.SGTotal = 0; 2145 c->Header.SGTotal = 0;
1898 } 2146 }
1899 c->Header.Tag.lower = c->busaddr; 2147 c->Header.Tag.lower = c->busaddr;
2148 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
1900 2149
1901 c->Request.Type.Type = cmd_type; 2150 c->Request.Type.Type = cmd_type;
1902 if (cmd_type == TYPE_CMD) { 2151 if (cmd_type == TYPE_CMD) {
1903 switch (cmd) { 2152 switch (cmd) {
1904 case CISS_INQUIRY: 2153 case CISS_INQUIRY:
1905 /* If the logical unit number is 0 then, this is going
1906 to controller so It's a physical command
1907 mode = 0 target = 0. So we have nothing to write.
1908 otherwise, if use_unit_num == 1,
1909 mode = 1(volume set addressing) target = LUNID
1910 otherwise, if use_unit_num == 2,
1911 mode = 0(periph dev addr) target = scsi3addr */
1912 if (use_unit_num == 1) {
1913 c->Header.LUN.LogDev.VolId =
1914 h->drv[log_unit].LunID;
1915 c->Header.LUN.LogDev.Mode = 1;
1916 } else if (use_unit_num == 2) {
1917 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1918 8);
1919 c->Header.LUN.LogDev.Mode = 0;
1920 }
1921 /* are we trying to read a vital product page */ 2154 /* are we trying to read a vital product page */
1922 if (page_code != 0) { 2155 if (page_code != 0) {
1923 c->Request.CDB[1] = 0x01; 2156 c->Request.CDB[1] = 0x01;
@@ -1947,8 +2180,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1947 break; 2180 break;
1948 2181
1949 case CCISS_READ_CAPACITY: 2182 case CCISS_READ_CAPACITY:
1950 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1951 c->Header.LUN.LogDev.Mode = 1;
1952 c->Request.CDBLen = 10; 2183 c->Request.CDBLen = 10;
1953 c->Request.Type.Attribute = ATTR_SIMPLE; 2184 c->Request.Type.Attribute = ATTR_SIMPLE;
1954 c->Request.Type.Direction = XFER_READ; 2185 c->Request.Type.Direction = XFER_READ;
@@ -1956,8 +2187,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1956 c->Request.CDB[0] = cmd; 2187 c->Request.CDB[0] = cmd;
1957 break; 2188 break;
1958 case CCISS_READ_CAPACITY_16: 2189 case CCISS_READ_CAPACITY_16:
1959 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1960 c->Header.LUN.LogDev.Mode = 1;
1961 c->Request.CDBLen = 16; 2190 c->Request.CDBLen = 16;
1962 c->Request.Type.Attribute = ATTR_SIMPLE; 2191 c->Request.Type.Attribute = ATTR_SIMPLE;
1963 c->Request.Type.Direction = XFER_READ; 2192 c->Request.Type.Direction = XFER_READ;
@@ -1979,6 +2208,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1979 c->Request.CDB[0] = BMIC_WRITE; 2208 c->Request.CDB[0] = BMIC_WRITE;
1980 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2209 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1981 break; 2210 break;
2211 case TEST_UNIT_READY:
2212 c->Request.CDBLen = 6;
2213 c->Request.Type.Attribute = ATTR_SIMPLE;
2214 c->Request.Type.Direction = XFER_NONE;
2215 c->Request.Timeout = 0;
2216 break;
1982 default: 2217 default:
1983 printk(KERN_WARNING 2218 printk(KERN_WARNING
1984 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); 2219 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
@@ -1997,13 +2232,13 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1997 memcpy(&c->Request.CDB[4], buff, 8); 2232 memcpy(&c->Request.CDB[4], buff, 8);
1998 break; 2233 break;
1999 case 1: /* RESET message */ 2234 case 1: /* RESET message */
2000 c->Request.CDBLen = 12; 2235 c->Request.CDBLen = 16;
2001 c->Request.Type.Attribute = ATTR_SIMPLE; 2236 c->Request.Type.Attribute = ATTR_SIMPLE;
2002 c->Request.Type.Direction = XFER_WRITE; 2237 c->Request.Type.Direction = XFER_NONE;
2003 c->Request.Timeout = 0; 2238 c->Request.Timeout = 0;
2004 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2239 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2005 c->Request.CDB[0] = cmd; /* reset */ 2240 c->Request.CDB[0] = cmd; /* reset */
2006 c->Request.CDB[1] = 0x04; /* reset a LUN */ 2241 c->Request.CDB[1] = 0x03; /* reset a target */
2007 break; 2242 break;
2008 case 3: /* No-Op message */ 2243 case 3: /* No-Op message */
2009 c->Request.CDBLen = 1; 2244 c->Request.CDBLen = 1;
@@ -2035,114 +2270,152 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
2035 return status; 2270 return status;
2036} 2271}
2037 2272
2038static int sendcmd_withirq(__u8 cmd, 2273static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
2039 int ctlr,
2040 void *buff,
2041 size_t size,
2042 unsigned int use_unit_num,
2043 unsigned int log_unit, __u8 page_code, int cmd_type)
2044{ 2274{
2045 ctlr_info_t *h = hba[ctlr]; 2275 switch (c->err_info->ScsiStatus) {
2046 CommandList_struct *c; 2276 case SAM_STAT_GOOD:
2277 return IO_OK;
2278 case SAM_STAT_CHECK_CONDITION:
2279 switch (0xf & c->err_info->SenseInfo[2]) {
2280 case 0: return IO_OK; /* no sense */
2281 case 1: return IO_OK; /* recovered error */
2282 default:
2283 printk(KERN_WARNING "cciss%d: cmd 0x%02x "
2284 "check condition, sense key = 0x%02x\n",
2285 h->ctlr, c->Request.CDB[0],
2286 c->err_info->SenseInfo[2]);
2287 }
2288 break;
2289 default:
2290 printk(KERN_WARNING "cciss%d: cmd 0x%02x"
2291 "scsi status = 0x%02x\n", h->ctlr,
2292 c->Request.CDB[0], c->err_info->ScsiStatus);
2293 break;
2294 }
2295 return IO_ERROR;
2296}
2297
2298static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
2299{
2300 int return_status = IO_OK;
2301
2302 if (c->err_info->CommandStatus == CMD_SUCCESS)
2303 return IO_OK;
2304
2305 switch (c->err_info->CommandStatus) {
2306 case CMD_TARGET_STATUS:
2307 return_status = check_target_status(h, c);
2308 break;
2309 case CMD_DATA_UNDERRUN:
2310 case CMD_DATA_OVERRUN:
2311 /* expected for inquiry and report lun commands */
2312 break;
2313 case CMD_INVALID:
2314 printk(KERN_WARNING "cciss: cmd 0x%02x is "
2315 "reported invalid\n", c->Request.CDB[0]);
2316 return_status = IO_ERROR;
2317 break;
2318 case CMD_PROTOCOL_ERR:
2319 printk(KERN_WARNING "cciss: cmd 0x%02x has "
2320 "protocol error \n", c->Request.CDB[0]);
2321 return_status = IO_ERROR;
2322 break;
2323 case CMD_HARDWARE_ERR:
2324 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2325 " hardware error\n", c->Request.CDB[0]);
2326 return_status = IO_ERROR;
2327 break;
2328 case CMD_CONNECTION_LOST:
2329 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2330 "connection lost\n", c->Request.CDB[0]);
2331 return_status = IO_ERROR;
2332 break;
2333 case CMD_ABORTED:
2334 printk(KERN_WARNING "cciss: cmd 0x%02x was "
2335 "aborted\n", c->Request.CDB[0]);
2336 return_status = IO_ERROR;
2337 break;
2338 case CMD_ABORT_FAILED:
2339 printk(KERN_WARNING "cciss: cmd 0x%02x reports "
2340 "abort failed\n", c->Request.CDB[0]);
2341 return_status = IO_ERROR;
2342 break;
2343 case CMD_UNSOLICITED_ABORT:
2344 printk(KERN_WARNING
2345 "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
2346 c->Request.CDB[0]);
2347 return_status = IO_NEEDS_RETRY;
2348 break;
2349 default:
2350 printk(KERN_WARNING "cciss: cmd 0x%02x returned "
2351 "unknown status %x\n", c->Request.CDB[0],
2352 c->err_info->CommandStatus);
2353 return_status = IO_ERROR;
2354 }
2355 return return_status;
2356}
2357
2358static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
2359 int attempt_retry)
2360{
2361 DECLARE_COMPLETION_ONSTACK(wait);
2047 u64bit buff_dma_handle; 2362 u64bit buff_dma_handle;
2048 unsigned long flags; 2363 unsigned long flags;
2049 int return_status; 2364 int return_status = IO_OK;
2050 DECLARE_COMPLETION_ONSTACK(wait);
2051 2365
2052 if ((c = cmd_alloc(h, 0)) == NULL) 2366resend_cmd2:
2053 return -ENOMEM;
2054 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2055 log_unit, page_code, NULL, cmd_type);
2056 if (return_status != IO_OK) {
2057 cmd_free(h, c, 0);
2058 return return_status;
2059 }
2060 resend_cmd2:
2061 c->waiting = &wait; 2367 c->waiting = &wait;
2062
2063 /* Put the request on the tail of the queue and send it */ 2368 /* Put the request on the tail of the queue and send it */
2064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 2369 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2065 addQ(&h->reqQ, c); 2370 addQ(&h->reqQ, c);
2066 h->Qdepth++; 2371 h->Qdepth++;
2067 start_io(h); 2372 start_io(h);
2068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 2373 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2069 2374
2070 wait_for_completion(&wait); 2375 wait_for_completion(&wait);
2071 2376
2072 if (c->err_info->CommandStatus != 0) { /* an error has occurred */ 2377 if (c->err_info->CommandStatus == 0 || !attempt_retry)
2073 switch (c->err_info->CommandStatus) { 2378 goto command_done;
2074 case CMD_TARGET_STATUS:
2075 printk(KERN_WARNING "cciss: cmd %p has "
2076 " completed with errors\n", c);
2077 if (c->err_info->ScsiStatus) {
2078 printk(KERN_WARNING "cciss: cmd %p "
2079 "has SCSI Status = %x\n",
2080 c, c->err_info->ScsiStatus);
2081 }
2082 2379
2083 break; 2380 return_status = process_sendcmd_error(h, c);
2084 case CMD_DATA_UNDERRUN: 2381
2085 case CMD_DATA_OVERRUN: 2382 if (return_status == IO_NEEDS_RETRY &&
2086 /* expected for inquire and report lun commands */ 2383 c->retry_count < MAX_CMD_RETRIES) {
2087 break; 2384 printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
2088 case CMD_INVALID: 2385 c->Request.CDB[0]);
2089 printk(KERN_WARNING "cciss: Cmd %p is " 2386 c->retry_count++;
2090 "reported invalid\n", c); 2387 /* erase the old error information */
2091 return_status = IO_ERROR; 2388 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2092 break; 2389 return_status = IO_OK;
2093 case CMD_PROTOCOL_ERR: 2390 INIT_COMPLETION(wait);
2094 printk(KERN_WARNING "cciss: cmd %p has " 2391 goto resend_cmd2;
2095 "protocol error \n", c);
2096 return_status = IO_ERROR;
2097 break;
2098 case CMD_HARDWARE_ERR:
2099 printk(KERN_WARNING "cciss: cmd %p had "
2100 " hardware error\n", c);
2101 return_status = IO_ERROR;
2102 break;
2103 case CMD_CONNECTION_LOST:
2104 printk(KERN_WARNING "cciss: cmd %p had "
2105 "connection lost\n", c);
2106 return_status = IO_ERROR;
2107 break;
2108 case CMD_ABORTED:
2109 printk(KERN_WARNING "cciss: cmd %p was "
2110 "aborted\n", c);
2111 return_status = IO_ERROR;
2112 break;
2113 case CMD_ABORT_FAILED:
2114 printk(KERN_WARNING "cciss: cmd %p reports "
2115 "abort failed\n", c);
2116 return_status = IO_ERROR;
2117 break;
2118 case CMD_UNSOLICITED_ABORT:
2119 printk(KERN_WARNING
2120 "cciss%d: unsolicited abort %p\n", ctlr, c);
2121 if (c->retry_count < MAX_CMD_RETRIES) {
2122 printk(KERN_WARNING
2123 "cciss%d: retrying %p\n", ctlr, c);
2124 c->retry_count++;
2125 /* erase the old error information */
2126 memset(c->err_info, 0,
2127 sizeof(ErrorInfo_struct));
2128 return_status = IO_OK;
2129 INIT_COMPLETION(wait);
2130 goto resend_cmd2;
2131 }
2132 return_status = IO_ERROR;
2133 break;
2134 default:
2135 printk(KERN_WARNING "cciss: cmd %p returned "
2136 "unknown status %x\n", c,
2137 c->err_info->CommandStatus);
2138 return_status = IO_ERROR;
2139 }
2140 } 2392 }
2393
2394command_done:
2141 /* unlock the buffers from DMA */ 2395 /* unlock the buffers from DMA */
2142 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2396 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2143 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2397 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2144 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2398 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2145 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2399 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2400 return return_status;
2401}
2402
2403static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
2404 __u8 page_code, unsigned char scsi3addr[],
2405 int cmd_type)
2406{
2407 ctlr_info_t *h = hba[ctlr];
2408 CommandList_struct *c;
2409 int return_status;
2410
2411 c = cmd_alloc(h, 0);
2412 if (!c)
2413 return -ENOMEM;
2414 return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2415 scsi3addr, cmd_type);
2416 if (return_status == IO_OK)
2417 return_status = sendcmd_withirq_core(h, c, 1);
2418
2146 cmd_free(h, c, 0); 2419 cmd_free(h, c, 0);
2147 return return_status; 2420 return return_status;
2148} 2421}
@@ -2155,15 +2428,17 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
2155{ 2428{
2156 int return_code; 2429 int return_code;
2157 unsigned long t; 2430 unsigned long t;
2431 unsigned char scsi3addr[8];
2158 2432
2159 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2433 memset(inq_buff, 0, sizeof(InquiryData_struct));
2434 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2160 if (withirq) 2435 if (withirq)
2161 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, 2436 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2162 inq_buff, sizeof(*inq_buff), 1, 2437 inq_buff, sizeof(*inq_buff),
2163 logvol, 0xC1, TYPE_CMD); 2438 0xC1, scsi3addr, TYPE_CMD);
2164 else 2439 else
2165 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, 2440 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2166 sizeof(*inq_buff), 1, logvol, 0xC1, NULL, 2441 sizeof(*inq_buff), 0xC1, scsi3addr,
2167 TYPE_CMD); 2442 TYPE_CMD);
2168 if (return_code == IO_OK) { 2443 if (return_code == IO_OK) {
2169 if (inq_buff->data_byte[8] == 0xFF) { 2444 if (inq_buff->data_byte[8] == 0xFF) {
@@ -2204,6 +2479,7 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2204{ 2479{
2205 ReadCapdata_struct *buf; 2480 ReadCapdata_struct *buf;
2206 int return_code; 2481 int return_code;
2482 unsigned char scsi3addr[8];
2207 2483
2208 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2484 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2209 if (!buf) { 2485 if (!buf) {
@@ -2211,14 +2487,15 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2211 return; 2487 return;
2212 } 2488 }
2213 2489
2490 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2214 if (withirq) 2491 if (withirq)
2215 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, 2492 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2216 ctlr, buf, sizeof(ReadCapdata_struct), 2493 ctlr, buf, sizeof(ReadCapdata_struct),
2217 1, logvol, 0, TYPE_CMD); 2494 0, scsi3addr, TYPE_CMD);
2218 else 2495 else
2219 return_code = sendcmd(CCISS_READ_CAPACITY, 2496 return_code = sendcmd(CCISS_READ_CAPACITY,
2220 ctlr, buf, sizeof(ReadCapdata_struct), 2497 ctlr, buf, sizeof(ReadCapdata_struct),
2221 1, logvol, 0, NULL, TYPE_CMD); 2498 0, scsi3addr, TYPE_CMD);
2222 if (return_code == IO_OK) { 2499 if (return_code == IO_OK) {
2223 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2500 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2224 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2501 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
@@ -2238,6 +2515,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
2238{ 2515{
2239 ReadCapdata_struct_16 *buf; 2516 ReadCapdata_struct_16 *buf;
2240 int return_code; 2517 int return_code;
2518 unsigned char scsi3addr[8];
2241 2519
2242 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2520 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2243 if (!buf) { 2521 if (!buf) {
@@ -2245,15 +2523,16 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
2245 return; 2523 return;
2246 } 2524 }
2247 2525
2526 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2248 if (withirq) { 2527 if (withirq) {
2249 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, 2528 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2250 ctlr, buf, sizeof(ReadCapdata_struct_16), 2529 ctlr, buf, sizeof(ReadCapdata_struct_16),
2251 1, logvol, 0, TYPE_CMD); 2530 0, scsi3addr, TYPE_CMD);
2252 } 2531 }
2253 else { 2532 else {
2254 return_code = sendcmd(CCISS_READ_CAPACITY_16, 2533 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2255 ctlr, buf, sizeof(ReadCapdata_struct_16), 2534 ctlr, buf, sizeof(ReadCapdata_struct_16),
2256 1, logvol, 0, NULL, TYPE_CMD); 2535 0, scsi3addr, TYPE_CMD);
2257 } 2536 }
2258 if (return_code == IO_OK) { 2537 if (return_code == IO_OK) {
2259 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2538 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
@@ -2303,7 +2582,7 @@ static int cciss_revalidate(struct gendisk *disk)
2303 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, 2582 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2304 inq_buff, drv); 2583 inq_buff, drv);
2305 2584
2306 blk_queue_hardsect_size(drv->queue, drv->block_size); 2585 blk_queue_logical_block_size(drv->queue, drv->block_size);
2307 set_capacity(disk, drv->nr_blocks); 2586 set_capacity(disk, drv->nr_blocks);
2308 2587
2309 kfree(inq_buff); 2588 kfree(inq_buff);
@@ -2333,86 +2612,21 @@ static unsigned long pollcomplete(int ctlr)
2333 return 1; 2612 return 1;
2334} 2613}
2335 2614
2336static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) 2615/* Send command c to controller h and poll for it to complete.
2337{ 2616 * Turns interrupts off on the board. Used at driver init time
2338 /* We get in here if sendcmd() is polling for completions 2617 * and during SCSI error recovery.
2339 and gets some command back that it wasn't expecting --
2340 something other than that which it just sent down.
2341 Ordinarily, that shouldn't happen, but it can happen when
2342 the scsi tape stuff gets into error handling mode, and
2343 starts using sendcmd() to try to abort commands and
2344 reset tape drives. In that case, sendcmd may pick up
2345 completions of commands that were sent to logical drives
2346 through the block i/o system, or cciss ioctls completing, etc.
2347 In that case, we need to save those completions for later
2348 processing by the interrupt handler.
2349 */
2350
2351#ifdef CONFIG_CISS_SCSI_TAPE
2352 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2353
2354 /* If it's not the scsi tape stuff doing error handling, (abort */
2355 /* or reset) then we don't expect anything weird. */
2356 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2357#endif
2358 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2359 "Invalid command list address returned! (%lx)\n",
2360 ctlr, complete);
2361 /* not much we can do. */
2362#ifdef CONFIG_CISS_SCSI_TAPE
2363 return 1;
2364 }
2365
2366 /* We've sent down an abort or reset, but something else
2367 has completed */
2368 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2369 /* Uh oh. No room to save it for later... */
2370 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2371 "reject list overflow, command lost!\n", ctlr);
2372 return 1;
2373 }
2374 /* Save it for later */
2375 srl->complete[srl->ncompletions] = complete;
2376 srl->ncompletions++;
2377#endif
2378 return 0;
2379}
2380
2381/*
2382 * Send a command to the controller, and wait for it to complete.
2383 * Only used at init time.
2384 */ 2618 */
2385static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2619static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
2386 1: address logical volume log_unit,
2387 2: periph device address is scsi3addr */
2388 unsigned int log_unit,
2389 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2390{ 2620{
2391 CommandList_struct *c;
2392 int i; 2621 int i;
2393 unsigned long complete; 2622 unsigned long complete;
2394 ctlr_info_t *info_p = hba[ctlr]; 2623 int status = IO_ERROR;
2395 u64bit buff_dma_handle; 2624 u64bit buff_dma_handle;
2396 int status, done = 0;
2397 2625
2398 if ((c = cmd_alloc(info_p, 1)) == NULL) { 2626resend_cmd1:
2399 printk(KERN_WARNING "cciss: unable to get memory"); 2627
2400 return IO_ERROR; 2628 /* Disable interrupt on the board. */
2401 } 2629 h->access.set_intr_mask(h, CCISS_INTR_OFF);
2402 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2403 log_unit, page_code, scsi3addr, cmd_type);
2404 if (status != IO_OK) {
2405 cmd_free(info_p, c, 1);
2406 return status;
2407 }
2408 resend_cmd1:
2409 /*
2410 * Disable interrupt
2411 */
2412#ifdef CCISS_DEBUG
2413 printk(KERN_DEBUG "cciss: turning intr off\n");
2414#endif /* CCISS_DEBUG */
2415 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2416 2630
2417 /* Make sure there is room in the command FIFO */ 2631 /* Make sure there is room in the command FIFO */
2418 /* Actually it should be completely empty at this time */ 2632 /* Actually it should be completely empty at this time */
@@ -2420,21 +2634,15 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
2420 /* tape side of the driver. */ 2634 /* tape side of the driver. */
2421 for (i = 200000; i > 0; i--) { 2635 for (i = 200000; i > 0; i--) {
2422 /* if fifo isn't full go */ 2636 /* if fifo isn't full go */
2423 if (!(info_p->access.fifo_full(info_p))) { 2637 if (!(h->access.fifo_full(h)))
2424
2425 break; 2638 break;
2426 }
2427 udelay(10); 2639 udelay(10);
2428 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," 2640 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2429 " waiting!\n", ctlr); 2641 " waiting!\n", h->ctlr);
2430 } 2642 }
2431 /* 2643 h->access.submit_command(h, c); /* Send the cmd */
2432 * Send the cmd
2433 */
2434 info_p->access.submit_command(info_p, c);
2435 done = 0;
2436 do { 2644 do {
2437 complete = pollcomplete(ctlr); 2645 complete = pollcomplete(h->ctlr);
2438 2646
2439#ifdef CCISS_DEBUG 2647#ifdef CCISS_DEBUG
2440 printk(KERN_DEBUG "cciss: command completed\n"); 2648 printk(KERN_DEBUG "cciss: command completed\n");
@@ -2443,97 +2651,102 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
2443 if (complete == 1) { 2651 if (complete == 1) {
2444 printk(KERN_WARNING 2652 printk(KERN_WARNING
2445 "cciss cciss%d: SendCmd Timeout out, " 2653 "cciss cciss%d: SendCmd Timeout out, "
2446 "No command list address returned!\n", ctlr); 2654 "No command list address returned!\n", h->ctlr);
2447 status = IO_ERROR; 2655 status = IO_ERROR;
2448 done = 1;
2449 break; 2656 break;
2450 } 2657 }
2451 2658
2452 /* This will need to change for direct lookup completions */ 2659 /* Make sure it's the command we're expecting. */
2453 if ((complete & CISS_ERROR_BIT) 2660 if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
2454 && (complete & ~CISS_ERROR_BIT) == c->busaddr) { 2661 printk(KERN_WARNING "cciss%d: Unexpected command "
2455 /* if data overrun or underun on Report command 2662 "completion.\n", h->ctlr);
2456 ignore it 2663 continue;
2457 */ 2664 }
2458 if (((c->Request.CDB[0] == CISS_REPORT_LOG) || 2665
2459 (c->Request.CDB[0] == CISS_REPORT_PHYS) || 2666 /* It is our command. If no error, we're done. */
2460 (c->Request.CDB[0] == CISS_INQUIRY)) && 2667 if (!(complete & CISS_ERROR_BIT)) {
2461 ((c->err_info->CommandStatus == 2668 status = IO_OK;
2462 CMD_DATA_OVERRUN) || 2669 break;
2463 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2464 )) {
2465 complete = c->busaddr;
2466 } else {
2467 if (c->err_info->CommandStatus ==
2468 CMD_UNSOLICITED_ABORT) {
2469 printk(KERN_WARNING "cciss%d: "
2470 "unsolicited abort %p\n",
2471 ctlr, c);
2472 if (c->retry_count < MAX_CMD_RETRIES) {
2473 printk(KERN_WARNING
2474 "cciss%d: retrying %p\n",
2475 ctlr, c);
2476 c->retry_count++;
2477 /* erase the old error */
2478 /* information */
2479 memset(c->err_info, 0,
2480 sizeof
2481 (ErrorInfo_struct));
2482 goto resend_cmd1;
2483 } else {
2484 printk(KERN_WARNING
2485 "cciss%d: retried %p too "
2486 "many times\n", ctlr, c);
2487 status = IO_ERROR;
2488 goto cleanup1;
2489 }
2490 } else if (c->err_info->CommandStatus ==
2491 CMD_UNABORTABLE) {
2492 printk(KERN_WARNING
2493 "cciss%d: command could not be aborted.\n",
2494 ctlr);
2495 status = IO_ERROR;
2496 goto cleanup1;
2497 }
2498 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2499 " Error %x \n", ctlr,
2500 c->err_info->CommandStatus);
2501 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2502 " offensive info\n"
2503 " size %x\n num %x value %x\n",
2504 ctlr,
2505 c->err_info->MoreErrInfo.Invalid_Cmd.
2506 offense_size,
2507 c->err_info->MoreErrInfo.Invalid_Cmd.
2508 offense_num,
2509 c->err_info->MoreErrInfo.Invalid_Cmd.
2510 offense_value);
2511 status = IO_ERROR;
2512 goto cleanup1;
2513 }
2514 } 2670 }
2515 /* This will need changing for direct lookup completions */ 2671
2516 if (complete != c->busaddr) { 2672 /* There is an error... */
2517 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) { 2673
2518 BUG(); /* we are pretty much hosed if we get here. */ 2674 /* if data overrun or underun on Report command ignore it */
2675 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2676 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2677 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2678 ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
2679 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
2680 complete = c->busaddr;
2681 status = IO_OK;
2682 break;
2683 }
2684
2685 if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
2686 printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
2687 h->ctlr, c);
2688 if (c->retry_count < MAX_CMD_RETRIES) {
2689 printk(KERN_WARNING "cciss%d: retrying %p\n",
2690 h->ctlr, c);
2691 c->retry_count++;
2692 /* erase the old error information */
2693 memset(c->err_info, 0, sizeof(c->err_info));
2694 goto resend_cmd1;
2519 } 2695 }
2520 continue; 2696 printk(KERN_WARNING "cciss%d: retried %p too many "
2521 } else 2697 "times\n", h->ctlr, c);
2522 done = 1; 2698 status = IO_ERROR;
2523 } while (!done); 2699 break;
2700 }
2701
2702 if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2703 printk(KERN_WARNING "cciss%d: command could not be "
2704 "aborted.\n", h->ctlr);
2705 status = IO_ERROR;
2706 break;
2707 }
2708
2709 if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
2710 status = check_target_status(h, c);
2711 break;
2712 }
2713
2714 printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
2715 printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
2716 c->Request.CDB[0], c->err_info->CommandStatus);
2717 status = IO_ERROR;
2718 break;
2719
2720 } while (1);
2524 2721
2525 cleanup1:
2526 /* unlock the data buffer from DMA */ 2722 /* unlock the data buffer from DMA */
2527 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2723 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2528 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2724 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2529 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, 2725 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2530 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2726 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2531#ifdef CONFIG_CISS_SCSI_TAPE 2727 return status;
2532 /* if we saved some commands for later, process them now. */ 2728}
2533 if (info_p->scsi_rejects.ncompletions > 0) 2729
2534 do_cciss_intr(0, info_p); 2730/*
2535#endif 2731 * Send a command to the controller, and wait for it to complete.
2536 cmd_free(info_p, c, 1); 2732 * Used at init time, and during SCSI error recovery.
2733 */
2734static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
2735 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2736{
2737 CommandList_struct *c;
2738 int status;
2739
2740 c = cmd_alloc(hba[ctlr], 1);
2741 if (!c) {
2742 printk(KERN_WARNING "cciss: unable to get memory");
2743 return IO_ERROR;
2744 }
2745 status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2746 scsi3addr, cmd_type);
2747 if (status == IO_OK)
2748 status = sendcmd_core(hba[ctlr], c);
2749 cmd_free(hba[ctlr], c, 1);
2537 return status; 2750 return status;
2538} 2751}
2539 2752
@@ -2691,7 +2904,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2691 printk(KERN_WARNING "cciss: cmd %p has" 2904 printk(KERN_WARNING "cciss: cmd %p has"
2692 " completed with data underrun " 2905 " completed with data underrun "
2693 "reported\n", cmd); 2906 "reported\n", cmd);
2694 cmd->rq->data_len = cmd->err_info->ResidualCnt; 2907 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
2695 } 2908 }
2696 break; 2909 break;
2697 case CMD_DATA_OVERRUN: 2910 case CMD_DATA_OVERRUN:
@@ -2806,7 +3019,7 @@ static void do_cciss_request(struct request_queue *q)
2806 goto startio; 3019 goto startio;
2807 3020
2808 queue: 3021 queue:
2809 creq = elv_next_request(q); 3022 creq = blk_peek_request(q);
2810 if (!creq) 3023 if (!creq)
2811 goto startio; 3024 goto startio;
2812 3025
@@ -2815,7 +3028,7 @@ static void do_cciss_request(struct request_queue *q)
2815 if ((c = cmd_alloc(h, 1)) == NULL) 3028 if ((c = cmd_alloc(h, 1)) == NULL)
2816 goto full; 3029 goto full;
2817 3030
2818 blkdev_dequeue_request(creq); 3031 blk_start_request(creq);
2819 3032
2820 spin_unlock_irq(q->queue_lock); 3033 spin_unlock_irq(q->queue_lock);
2821 3034
@@ -2840,10 +3053,10 @@ static void do_cciss_request(struct request_queue *q)
2840 c->Request.Timeout = 0; // Don't time out 3053 c->Request.Timeout = 0; // Don't time out
2841 c->Request.CDB[0] = 3054 c->Request.CDB[0] =
2842 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3055 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2843 start_blk = creq->sector; 3056 start_blk = blk_rq_pos(creq);
2844#ifdef CCISS_DEBUG 3057#ifdef CCISS_DEBUG
2845 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 3058 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
2846 (int)creq->nr_sectors); 3059 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
2847#endif /* CCISS_DEBUG */ 3060#endif /* CCISS_DEBUG */
2848 3061
2849 sg_init_table(tmp_sg, MAXSGENTRIES); 3062 sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +3082,8 @@ static void do_cciss_request(struct request_queue *q)
2869 h->maxSG = seg; 3082 h->maxSG = seg;
2870 3083
2871#ifdef CCISS_DEBUG 3084#ifdef CCISS_DEBUG
2872 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 3085 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
2873 creq->nr_sectors, seg); 3086 blk_rq_sectors(creq), seg);
2874#endif /* CCISS_DEBUG */ 3087#endif /* CCISS_DEBUG */
2875 3088
2876 c->Header.SGList = c->Header.SGTotal = seg; 3089 c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +3095,8 @@ static void do_cciss_request(struct request_queue *q)
2882 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 3095 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2883 c->Request.CDB[5] = start_blk & 0xff; 3096 c->Request.CDB[5] = start_blk & 0xff;
2884 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 3097 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2885 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 3098 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
2886 c->Request.CDB[8] = creq->nr_sectors & 0xff; 3099 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
2887 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 3100 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2888 } else { 3101 } else {
2889 u32 upper32 = upper_32_bits(start_blk); 3102 u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +3111,10 @@ static void do_cciss_request(struct request_queue *q)
2898 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 3111 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2899 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 3112 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2900 c->Request.CDB[9]= start_blk & 0xff; 3113 c->Request.CDB[9]= start_blk & 0xff;
2901 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 3114 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
2902 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 3115 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
2903 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 3116 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
2904 c->Request.CDB[13]= creq->nr_sectors & 0xff; 3117 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
2905 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3118 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2906 } 3119 }
2907 } else if (blk_pc_request(creq)) { 3120 } else if (blk_pc_request(creq)) {
@@ -2931,44 +3144,18 @@ startio:
2931 3144
2932static inline unsigned long get_next_completion(ctlr_info_t *h) 3145static inline unsigned long get_next_completion(ctlr_info_t *h)
2933{ 3146{
2934#ifdef CONFIG_CISS_SCSI_TAPE
2935 /* Any rejects from sendcmd() lying around? Process them first */
2936 if (h->scsi_rejects.ncompletions == 0)
2937 return h->access.command_completed(h);
2938 else {
2939 struct sendcmd_reject_list *srl;
2940 int n;
2941 srl = &h->scsi_rejects;
2942 n = --srl->ncompletions;
2943 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2944 printk("p");
2945 return srl->complete[n];
2946 }
2947#else
2948 return h->access.command_completed(h); 3147 return h->access.command_completed(h);
2949#endif
2950} 3148}
2951 3149
2952static inline int interrupt_pending(ctlr_info_t *h) 3150static inline int interrupt_pending(ctlr_info_t *h)
2953{ 3151{
2954#ifdef CONFIG_CISS_SCSI_TAPE
2955 return (h->access.intr_pending(h)
2956 || (h->scsi_rejects.ncompletions > 0));
2957#else
2958 return h->access.intr_pending(h); 3152 return h->access.intr_pending(h);
2959#endif
2960} 3153}
2961 3154
2962static inline long interrupt_not_for_us(ctlr_info_t *h) 3155static inline long interrupt_not_for_us(ctlr_info_t *h)
2963{ 3156{
2964#ifdef CONFIG_CISS_SCSI_TAPE
2965 return (((h->access.intr_pending(h) == 0) ||
2966 (h->interrupts_enabled == 0))
2967 && (h->scsi_rejects.ncompletions == 0));
2968#else
2969 return (((h->access.intr_pending(h) == 0) || 3157 return (((h->access.intr_pending(h) == 0) ||
2970 (h->interrupts_enabled == 0))); 3158 (h->interrupts_enabled == 0)));
2971#endif
2972} 3159}
2973 3160
2974static irqreturn_t do_cciss_intr(int irq, void *dev_id) 3161static irqreturn_t do_cciss_intr(int irq, void *dev_id)
@@ -3723,12 +3910,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3723 INIT_HLIST_HEAD(&hba[i]->reqQ); 3910 INIT_HLIST_HEAD(&hba[i]->reqQ);
3724 3911
3725 if (cciss_pci_init(hba[i], pdev) != 0) 3912 if (cciss_pci_init(hba[i], pdev) != 0)
3726 goto clean1; 3913 goto clean0;
3727 3914
3728 sprintf(hba[i]->devname, "cciss%d", i); 3915 sprintf(hba[i]->devname, "cciss%d", i);
3729 hba[i]->ctlr = i; 3916 hba[i]->ctlr = i;
3730 hba[i]->pdev = pdev; 3917 hba[i]->pdev = pdev;
3731 3918
3919 if (cciss_create_hba_sysfs_entry(hba[i]))
3920 goto clean0;
3921
3732 /* configure PCI DMA stuff */ 3922 /* configure PCI DMA stuff */
3733 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 3923 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3734 dac = 1; 3924 dac = 1;
@@ -3787,15 +3977,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3787 printk(KERN_ERR "cciss: out of memory"); 3977 printk(KERN_ERR "cciss: out of memory");
3788 goto clean4; 3978 goto clean4;
3789 } 3979 }
3790#ifdef CONFIG_CISS_SCSI_TAPE
3791 hba[i]->scsi_rejects.complete =
3792 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3793 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3794 if (hba[i]->scsi_rejects.complete == NULL) {
3795 printk(KERN_ERR "cciss: out of memory");
3796 goto clean4;
3797 }
3798#endif
3799 spin_lock_init(&hba[i]->lock); 3980 spin_lock_init(&hba[i]->lock);
3800 3981
3801 /* Initialize the pdev driver private data. 3982 /* Initialize the pdev driver private data.
@@ -3828,7 +4009,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3828 } 4009 }
3829 4010
3830 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, 4011 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3831 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD); 4012 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
3832 if (return_code == IO_OK) { 4013 if (return_code == IO_OK) {
3833 hba[i]->firm_ver[0] = inq_buff->data_byte[32]; 4014 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3834 hba[i]->firm_ver[1] = inq_buff->data_byte[33]; 4015 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
@@ -3855,9 +4036,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3855 4036
3856clean4: 4037clean4:
3857 kfree(inq_buff); 4038 kfree(inq_buff);
3858#ifdef CONFIG_CISS_SCSI_TAPE
3859 kfree(hba[i]->scsi_rejects.complete);
3860#endif
3861 kfree(hba[i]->cmd_pool_bits); 4039 kfree(hba[i]->cmd_pool_bits);
3862 if (hba[i]->cmd_pool) 4040 if (hba[i]->cmd_pool)
3863 pci_free_consistent(hba[i]->pdev, 4041 pci_free_consistent(hba[i]->pdev,
@@ -3872,6 +4050,8 @@ clean4:
3872clean2: 4050clean2:
3873 unregister_blkdev(hba[i]->major, hba[i]->devname); 4051 unregister_blkdev(hba[i]->major, hba[i]->devname);
3874clean1: 4052clean1:
4053 cciss_destroy_hba_sysfs_entry(hba[i]);
4054clean0:
3875 hba[i]->busy_initializing = 0; 4055 hba[i]->busy_initializing = 0;
3876 /* cleanup any queues that may have been initialized */ 4056 /* cleanup any queues that may have been initialized */
3877 for (j=0; j <= hba[i]->highest_lun; j++){ 4057 for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3907,8 +4087,8 @@ static void cciss_shutdown(struct pci_dev *pdev)
3907 /* sendcmd will turn off interrupt, and send the flush... 4087 /* sendcmd will turn off interrupt, and send the flush...
3908 * To write all data in the battery backed cache to disks */ 4088 * To write all data in the battery backed cache to disks */
3909 memset(flush_buf, 0, 4); 4089 memset(flush_buf, 0, 4);
3910 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, 4090 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
3911 TYPE_CMD); 4091 CTLR_LUNID, TYPE_CMD);
3912 if (return_code == IO_OK) { 4092 if (return_code == IO_OK) {
3913 printk(KERN_INFO "Completed flushing cache on controller %d\n", i); 4093 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3914 } else { 4094 } else {
@@ -3973,15 +4153,13 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3973 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4153 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3974 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 4154 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3975 kfree(hba[i]->cmd_pool_bits); 4155 kfree(hba[i]->cmd_pool_bits);
3976#ifdef CONFIG_CISS_SCSI_TAPE
3977 kfree(hba[i]->scsi_rejects.complete);
3978#endif
3979 /* 4156 /*
3980 * Deliberately omit pci_disable_device(): it does something nasty to 4157 * Deliberately omit pci_disable_device(): it does something nasty to
3981 * Smart Array controllers that pci_enable_device does not undo 4158 * Smart Array controllers that pci_enable_device does not undo
3982 */ 4159 */
3983 pci_release_regions(pdev); 4160 pci_release_regions(pdev);
3984 pci_set_drvdata(pdev, NULL); 4161 pci_set_drvdata(pdev, NULL);
4162 cciss_destroy_hba_sysfs_entry(hba[i]);
3985 free_hba(i); 4163 free_hba(i);
3986} 4164}
3987 4165
@@ -3999,6 +4177,8 @@ static struct pci_driver cciss_pci_driver = {
3999 */ 4177 */
4000static int __init cciss_init(void) 4178static int __init cciss_init(void)
4001{ 4179{
4180 int err;
4181
4002 /* 4182 /*
4003 * The hardware requires that commands are aligned on a 64-bit 4183 * The hardware requires that commands are aligned on a 64-bit
4004 * boundary. Given that we use pci_alloc_consistent() to allocate an 4184 * boundary. Given that we use pci_alloc_consistent() to allocate an
@@ -4008,8 +4188,20 @@ static int __init cciss_init(void)
4008 4188
4009 printk(KERN_INFO DRIVER_NAME "\n"); 4189 printk(KERN_INFO DRIVER_NAME "\n");
4010 4190
4191 err = bus_register(&cciss_bus_type);
4192 if (err)
4193 return err;
4194
4011 /* Register for our PCI devices */ 4195 /* Register for our PCI devices */
4012 return pci_register_driver(&cciss_pci_driver); 4196 err = pci_register_driver(&cciss_pci_driver);
4197 if (err)
4198 goto err_bus_register;
4199
4200 return 0;
4201
4202err_bus_register:
4203 bus_unregister(&cciss_bus_type);
4204 return err;
4013} 4205}
4014 4206
4015static void __exit cciss_cleanup(void) 4207static void __exit cciss_cleanup(void)
@@ -4026,6 +4218,7 @@ static void __exit cciss_cleanup(void)
4026 } 4218 }
4027 } 4219 }
4028 remove_proc_entry("driver/cciss", NULL); 4220 remove_proc_entry("driver/cciss", NULL);
4221 bus_unregister(&cciss_bus_type);
4029} 4222}
4030 4223
4031static void fail_all_cmds(unsigned long ctlr) 4224static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 703e08038fb9..06a5db25b298 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -11,6 +11,11 @@
11 11
12#define IO_OK 0 12#define IO_OK 0
13#define IO_ERROR 1 13#define IO_ERROR 1
14#define IO_NEEDS_RETRY 3
15
16#define VENDOR_LEN 8
17#define MODEL_LEN 16
18#define REV_LEN 4
14 19
15struct ctlr_info; 20struct ctlr_info;
16typedef struct ctlr_info ctlr_info_t; 21typedef struct ctlr_info ctlr_info_t;
@@ -34,23 +39,20 @@ typedef struct _drive_info_struct
34 int cylinders; 39 int cylinders;
35 int raid_level; /* set to -1 to indicate that 40 int raid_level; /* set to -1 to indicate that
36 * the drive is not in use/configured 41 * the drive is not in use/configured
37 */ 42 */
38 int busy_configuring; /*This is set when the drive is being removed 43 int busy_configuring; /* This is set when a drive is being removed
39 *to prevent it from being opened or it's queue 44 * to prevent it from being opened or it's
40 *from being started. 45 * queue from being started.
41 */ 46 */
42 __u8 serial_no[16]; /* from inquiry page 0x83, */ 47 struct device dev;
43 /* not necc. null terminated. */ 48 __u8 serial_no[16]; /* from inquiry page 0x83,
49 * not necc. null terminated.
50 */
51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
52 char model[MODEL_LEN + 1]; /* SCSI model string */
53 char rev[REV_LEN + 1]; /* SCSI revision string */
44} drive_info_struct; 54} drive_info_struct;
45 55
46#ifdef CONFIG_CISS_SCSI_TAPE
47
48struct sendcmd_reject_list {
49 int ncompletions;
50 unsigned long *complete; /* array of NR_CMDS tags */
51};
52
53#endif
54struct ctlr_info 56struct ctlr_info
55{ 57{
56 int ctlr; 58 int ctlr;
@@ -118,11 +120,11 @@ struct ctlr_info
118 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ 120 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
119 /* list of block side commands the scsi error handling sucked up */ 121 /* list of block side commands the scsi error handling sucked up */
120 /* and saved for later processing */ 122 /* and saved for later processing */
121 struct sendcmd_reject_list scsi_rejects;
122#endif 123#endif
123 unsigned char alive; 124 unsigned char alive;
124 struct completion *rescan_wait; 125 struct completion *rescan_wait;
125 struct task_struct *cciss_scan_thread; 126 struct task_struct *cciss_scan_thread;
127 struct device dev;
126}; 128};
127 129
128/* Defining the diffent access_menthods */ 130/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 40b1b92dae7f..cd665b00c7c5 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
217 LogDevAddr_struct LogDev; 217 LogDevAddr_struct LogDev;
218} LUNAddr_struct; 218} LUNAddr_struct;
219 219
220#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
221
220typedef struct _CommandListHeader_struct { 222typedef struct _CommandListHeader_struct {
221 BYTE ReplyQueue; 223 BYTE ReplyQueue;
222 BYTE SGList; 224 BYTE SGList;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index a3fd87b41444..3315268b4ec7 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,20 +44,13 @@
44#define CCISS_ABORT_MSG 0x00 44#define CCISS_ABORT_MSG 0x00
45#define CCISS_RESET_MSG 0x01 45#define CCISS_RESET_MSG 0x01
46 46
47/* some prototypes... */ 47static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
48static int sendcmd( 48 size_t size,
49 __u8 cmd, 49 __u8 page_code, unsigned char *scsi3addr,
50 int ctlr,
51 void *buff,
52 size_t size,
53 unsigned int use_unit_num, /* 0: address the controller,
54 1: address logical volume log_unit,
55 2: address is in scsi3addr */
56 unsigned int log_unit,
57 __u8 page_code,
58 unsigned char *scsi3addr,
59 int cmd_type); 50 int cmd_type);
60 51
52static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
53static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
61 54
62static int cciss_scsi_proc_info( 55static int cciss_scsi_proc_info(
63 struct Scsi_Host *sh, 56 struct Scsi_Host *sh,
@@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
1575 CPQ_TAPE_UNLOCK(ctlr, flags); 1568 CPQ_TAPE_UNLOCK(ctlr, flags);
1576} 1569}
1577 1570
1571static int wait_for_device_to_become_ready(ctlr_info_t *h,
1572 unsigned char lunaddr[])
1573{
1574 int rc;
1575 int count = 0;
1576 int waittime = HZ;
1577 CommandList_struct *c;
1578
1579 c = cmd_alloc(h, 1);
1580 if (!c) {
1581 printk(KERN_WARNING "cciss%d: out of memory in "
1582 "wait_for_device_to_become_ready.\n", h->ctlr);
1583 return IO_ERROR;
1584 }
1585
1586 /* Send test unit ready until device ready, or give up. */
1587 while (count < 20) {
1588
1589 /* Wait for a bit. do this first, because if we send
1590 * the TUR right away, the reset will just abort it.
1591 */
1592 schedule_timeout_uninterruptible(waittime);
1593 count++;
1594
1595 /* Increase wait time with each try, up to a point. */
1596 if (waittime < (HZ * 30))
1597 waittime = waittime * 2;
1598
1599 /* Send the Test Unit Ready */
1600 rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
1601 lunaddr, TYPE_CMD);
1602 if (rc == 0)
1603 rc = sendcmd_withirq_core(h, c, 0);
1604
1605 (void) process_sendcmd_error(h, c);
1606
1607 if (rc != 0)
1608 goto retry_tur;
1609
1610 if (c->err_info->CommandStatus == CMD_SUCCESS)
1611 break;
1612
1613 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1614 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1615 if (c->err_info->SenseInfo[2] == NO_SENSE)
1616 break;
1617 if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
1618 unsigned char asc;
1619 asc = c->err_info->SenseInfo[12];
1620 check_for_unit_attention(h, c);
1621 if (asc == POWER_OR_RESET)
1622 break;
1623 }
1624 }
1625retry_tur:
1626 printk(KERN_WARNING "cciss%d: Waiting %d secs "
1627 "for device to become ready.\n",
1628 h->ctlr, waittime / HZ);
1629 rc = 1; /* device not ready. */
1630 }
1631
1632 if (rc)
1633 printk("cciss%d: giving up on device.\n", h->ctlr);
1634 else
1635 printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
1636
1637 cmd_free(h, c, 1);
1638 return rc;
1639}
1578 1640
1579/* Need at least one of these error handlers to keep ../scsi/hosts.c from 1641/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1580 * complaining. Doing a host- or bus-reset can't do anything good here. 1642 * complaining. Doing a host- or bus-reset can't do anything good here.
@@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1591{ 1653{
1592 int rc; 1654 int rc;
1593 CommandList_struct *cmd_in_trouble; 1655 CommandList_struct *cmd_in_trouble;
1656 unsigned char lunaddr[8];
1594 ctlr_info_t **c; 1657 ctlr_info_t **c;
1595 int ctlr; 1658 int ctlr;
1596 1659
@@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1600 return FAILED; 1663 return FAILED;
1601 ctlr = (*c)->ctlr; 1664 ctlr = (*c)->ctlr;
1602 printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); 1665 printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
1603
1604 /* find the command that's giving us trouble */ 1666 /* find the command that's giving us trouble */
1605 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; 1667 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
1606 if (cmd_in_trouble == NULL) { /* paranoia */ 1668 if (cmd_in_trouble == NULL) /* paranoia */
1607 return FAILED; 1669 return FAILED;
1608 } 1670 memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
1609 /* send a reset to the SCSI LUN which the command was sent to */ 1671 /* send a reset to the SCSI LUN which the command was sent to */
1610 rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0, 1672 rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
1611 (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
1612 TYPE_MSG); 1673 TYPE_MSG);
1613 /* sendcmd turned off interrupts on the board, turn 'em back on. */ 1674 if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
1614 (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
1615 if (rc == 0)
1616 return SUCCESS; 1675 return SUCCESS;
1617 printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); 1676 printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
1618 return FAILED; 1677 return FAILED;
@@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1622{ 1681{
1623 int rc; 1682 int rc;
1624 CommandList_struct *cmd_to_abort; 1683 CommandList_struct *cmd_to_abort;
1684 unsigned char lunaddr[8];
1625 ctlr_info_t **c; 1685 ctlr_info_t **c;
1626 int ctlr; 1686 int ctlr;
1627 1687
@@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1636 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; 1696 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
1637 if (cmd_to_abort == NULL) /* paranoia */ 1697 if (cmd_to_abort == NULL) /* paranoia */
1638 return FAILED; 1698 return FAILED;
1639 rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, 1699 memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
1640 0, 2, 0, 0, 1700 rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
1641 (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0], 1701 0, 0, lunaddr, TYPE_MSG);
1642 TYPE_MSG);
1643 /* sendcmd turned off interrupts on the board, turn 'em back on. */
1644 (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
1645 if (rc == 0) 1702 if (rc == 0)
1646 return SUCCESS; 1703 return SUCCESS;
1647 return FAILED; 1704 return FAILED;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca11159..44fa2018f6b0 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
474 disk->fops = &ida_fops; 474 disk->fops = &ida_fops;
475 if (j && !drv->nr_blks) 475 if (j && !drv->nr_blks)
476 continue; 476 continue;
477 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 477 blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
478 set_capacity(disk, drv->nr_blks); 478 set_capacity(disk, drv->nr_blks);
479 disk->queue = hba[i]->queue; 479 disk->queue = hba[i]->queue;
480 disk->private_data = drv; 480 disk->private_data = drv;
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
903 goto startio; 903 goto startio;
904 904
905queue_next: 905queue_next:
906 creq = elv_next_request(q); 906 creq = blk_peek_request(q);
907 if (!creq) 907 if (!creq)
908 goto startio; 908 goto startio;
909 909
@@ -912,17 +912,18 @@ queue_next:
912 if ((c = cmd_alloc(h,1)) == NULL) 912 if ((c = cmd_alloc(h,1)) == NULL)
913 goto startio; 913 goto startio;
914 914
915 blkdev_dequeue_request(creq); 915 blk_start_request(creq);
916 916
917 c->ctlr = h->ctlr; 917 c->ctlr = h->ctlr;
918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
919 c->hdr.size = sizeof(rblk_t) >> 2; 919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t); 920 c->size += sizeof(rblk_t);
921 921
922 c->req.hdr.blk = creq->sector; 922 c->req.hdr.blk = blk_rq_pos(creq);
923 c->rq = creq; 923 c->rq = creq;
924DBGPX( 924DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 printk("sector=%d, nr_sectors=%u\n",
926 blk_rq_pos(creq), blk_rq_sectors(creq));
926); 927);
927 sg_init_table(tmp_sg, SG_MAX); 928 sg_init_table(tmp_sg, SG_MAX);
928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
940 tmp_sg[i].offset, 941 tmp_sg[i].offset,
941 tmp_sg[i].length, dir); 942 tmp_sg[i].length, dir);
942 } 943 }
943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.sg_cnt = seg;
945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
947 c->type = CMD_RWREQ; 948 c->type = CMD_RWREQ;
948 949
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1024 cmd->req.sg[i].size, ddir); 1025 cmd->req.sg[i].size, ddir);
1025 1026
1026 DBGPX(printk("Done with %p\n", rq);); 1027 DBGPX(printk("Done with %p\n", rq););
1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1028 __blk_end_request_all(rq, error);
1028 BUG();
1029} 1029}
1030 1030
1031/* 1031/*
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
1546 drv_info_t *drv = &host->drv[i]; 1546 drv_info_t *drv = &host->drv[i];
1547 if (i && !drv->nr_blks) 1547 if (i && !drv->nr_blks)
1548 continue; 1548 continue;
1549 blk_queue_hardsect_size(host->queue, drv->blk_size); 1549 blk_queue_logical_block_size(host->queue, drv->blk_size);
1550 set_capacity(disk, drv->nr_blks); 1550 set_capacity(disk, drv->nr_blks);
1551 disk->queue = host->queue; 1551 disk->queue = host->queue;
1552 disk->private_data = drv; 1552 disk->private_data = drv;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f1642..862b40c90181 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
931 del_timer(&fd_timeout); 931 del_timer(&fd_timeout);
932 cont = NULL; 932 cont = NULL;
933 clear_bit(0, &fdc_busy); 933 clear_bit(0, &fdc_busy);
934 if (elv_next_request(floppy_queue)) 934 if (current_req || blk_peek_request(floppy_queue))
935 do_fd_request(floppy_queue); 935 do_fd_request(floppy_queue);
936 spin_unlock_irqrestore(&floppy_lock, flags); 936 spin_unlock_irqrestore(&floppy_lock, flags);
937 wake_up(&fdc_wait); 937 wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
2303 2303
2304 /* current_count_sectors can be zero if transfer failed */ 2304 /* current_count_sectors can be zero if transfer failed */
2305 if (error) 2305 if (error)
2306 nr_sectors = req->current_nr_sectors; 2306 nr_sectors = blk_rq_cur_sectors(req);
2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2307 if (__blk_end_request(req, error, nr_sectors << 9))
2308 return; 2308 return;
2309 2309
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
2332 if (uptodate) { 2332 if (uptodate) {
2333 /* maintain values for invalidation on geometry 2333 /* maintain values for invalidation on geometry
2334 * change */ 2334 * change */
2335 block = current_count_sectors + req->sector; 2335 block = current_count_sectors + blk_rq_pos(req);
2336 INFBOUND(DRS->maxblock, block); 2336 INFBOUND(DRS->maxblock, block);
2337 if (block > _floppy->sect) 2337 if (block > _floppy->sect)
2338 DRS->maxtrack = 1; 2338 DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
2346 /* record write error information */ 2346 /* record write error information */
2347 DRWE->write_errors++; 2347 DRWE->write_errors++;
2348 if (DRWE->write_errors == 1) { 2348 if (DRWE->write_errors == 1) {
2349 DRWE->first_error_sector = req->sector; 2349 DRWE->first_error_sector = blk_rq_pos(req);
2350 DRWE->first_error_generation = DRS->generation; 2350 DRWE->first_error_generation = DRS->generation;
2351 } 2351 }
2352 DRWE->last_error_sector = req->sector; 2352 DRWE->last_error_sector = blk_rq_pos(req);
2353 DRWE->last_error_generation = DRS->generation; 2353 DRWE->last_error_generation = DRS->generation;
2354 } 2354 }
2355 spin_lock_irqsave(q->queue_lock, flags); 2355 spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2503 2503
2504 max_sector = transfer_size(ssize, 2504 max_sector = transfer_size(ssize,
2505 min(max_sector, max_sector_2), 2505 min(max_sector, max_sector_2),
2506 current_req->nr_sectors); 2506 blk_rq_sectors(current_req));
2507 2507
2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2509 buffer_max > fsector_t + current_req->nr_sectors) 2509 buffer_max > fsector_t + blk_rq_sectors(current_req))
2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2510 current_count_sectors = min_t(int, buffer_max - fsector_t,
2511 current_req->nr_sectors); 2511 blk_rq_sectors(current_req));
2512 2512
2513 remaining = current_count_sectors << 9; 2513 remaining = current_count_sectors << 9;
2514#ifdef FLOPPY_SANITY_CHECK 2514#ifdef FLOPPY_SANITY_CHECK
2515 if ((remaining >> 9) > current_req->nr_sectors && 2515 if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
2516 CT(COMMAND) == FD_WRITE) {
2517 DPRINT("in copy buffer\n"); 2516 DPRINT("in copy buffer\n");
2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2517 printk("current_count_sectors=%ld\n", current_count_sectors);
2519 printk("remaining=%d\n", remaining >> 9); 2518 printk("remaining=%d\n", remaining >> 9);
2520 printk("current_req->nr_sectors=%ld\n", 2519 printk("current_req->nr_sectors=%u\n",
2521 current_req->nr_sectors); 2520 blk_rq_sectors(current_req));
2522 printk("current_req->current_nr_sectors=%u\n", 2521 printk("current_req->current_nr_sectors=%u\n",
2523 current_req->current_nr_sectors); 2522 blk_rq_cur_sectors(current_req));
2524 printk("max_sector=%d\n", max_sector); 2523 printk("max_sector=%d\n", max_sector);
2525 printk("ssize=%d\n", ssize); 2524 printk("ssize=%d\n", ssize);
2526 } 2525 }
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2530 2529
2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2530 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2532 2531
2533 size = current_req->current_nr_sectors << 9; 2532 size = blk_rq_cur_bytes(current_req);
2534 2533
2535 rq_for_each_segment(bv, current_req, iter) { 2534 rq_for_each_segment(bv, current_req, iter) {
2536 if (!remaining) 2535 if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
2648 2647
2649 max_sector = _floppy->sect * _floppy->head; 2648 max_sector = _floppy->sect * _floppy->head;
2650 2649
2651 TRACK = (int)current_req->sector / max_sector; 2650 TRACK = (int)blk_rq_pos(current_req) / max_sector;
2652 fsector_t = (int)current_req->sector % max_sector; 2651 fsector_t = (int)blk_rq_pos(current_req) % max_sector;
2653 if (_floppy->track && TRACK >= _floppy->track) { 2652 if (_floppy->track && TRACK >= _floppy->track) {
2654 if (current_req->current_nr_sectors & 1) { 2653 if (blk_rq_cur_sectors(current_req) & 1) {
2655 current_count_sectors = 1; 2654 current_count_sectors = 1;
2656 return 1; 2655 return 1;
2657 } else 2656 } else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
2669 if (fsector_t >= max_sector) { 2668 if (fsector_t >= max_sector) {
2670 current_count_sectors = 2669 current_count_sectors =
2671 min_t(int, _floppy->sect - fsector_t, 2670 min_t(int, _floppy->sect - fsector_t,
2672 current_req->nr_sectors); 2671 blk_rq_sectors(current_req));
2673 return 1; 2672 return 1;
2674 } 2673 }
2675 SIZECODE = 2; 2674 SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
2720 2719
2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2720 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2722 aligned_sector_t = fsector_t - in_sector_offset; 2721 aligned_sector_t = fsector_t - in_sector_offset;
2723 max_size = current_req->nr_sectors; 2722 max_size = blk_rq_sectors(current_req);
2724 if ((raw_cmd->track == buffer_track) && 2723 if ((raw_cmd->track == buffer_track) &&
2725 (current_drive == buffer_drive) && 2724 (current_drive == buffer_drive) &&
2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { 2725 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
2729 copy_buffer(1, max_sector, buffer_max); 2728 copy_buffer(1, max_sector, buffer_max);
2730 return 1; 2729 return 1;
2731 } 2730 }
2732 } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2731 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
2733 if (CT(COMMAND) == FD_WRITE) { 2732 if (CT(COMMAND) == FD_WRITE) {
2734 if (fsector_t + current_req->nr_sectors > ssize && 2733 if (fsector_t + blk_rq_sectors(current_req) > ssize &&
2735 fsector_t + current_req->nr_sectors < ssize + ssize) 2734 fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
2736 max_size = ssize + ssize; 2735 max_size = ssize + ssize;
2737 else 2736 else
2738 max_size = ssize; 2737 max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
2776 (indirect * 2 > direct * 3 && 2775 (indirect * 2 > direct * 3 &&
2777 *errors < DP->max_errors.read_track && ((!probing 2776 *errors < DP->max_errors.read_track && ((!probing
2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2777 || (DP->read_track & (1 << DRS->probed_format)))))) {
2779 max_size = current_req->nr_sectors; 2778 max_size = blk_rq_sectors(current_req);
2780 } else { 2779 } else {
2781 raw_cmd->kernel_data = current_req->buffer; 2780 raw_cmd->kernel_data = current_req->buffer;
2782 raw_cmd->length = current_count_sectors << 9; 2781 raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
2801 fsector_t > buffer_max || 2800 fsector_t > buffer_max ||
2802 fsector_t < buffer_min || 2801 fsector_t < buffer_min ||
2803 ((CT(COMMAND) == FD_READ || 2802 ((CT(COMMAND) == FD_READ ||
2804 (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2803 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2804 max_sector > 2 * max_buffer_sectors + buffer_min &&
2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2805 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2807 /* not enough space */ 2806 /* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
2879 printk("write\n"); 2878 printk("write\n");
2880 return 0; 2879 return 0;
2881 } 2880 }
2882 } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2881 } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
2883 current_count_sectors > current_req->nr_sectors) { 2882 current_count_sectors > blk_rq_sectors(current_req)) {
2884 DPRINT("buffer overrun in direct transfer\n"); 2883 DPRINT("buffer overrun in direct transfer\n");
2885 return 0; 2884 return 0;
2886 } else if (raw_cmd->length < current_count_sectors << 9) { 2885 } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
2913 struct request *req; 2912 struct request *req;
2914 2913
2915 spin_lock_irq(floppy_queue->queue_lock); 2914 spin_lock_irq(floppy_queue->queue_lock);
2916 req = elv_next_request(floppy_queue); 2915 req = blk_fetch_request(floppy_queue);
2917 spin_unlock_irq(floppy_queue->queue_lock); 2916 spin_unlock_irq(floppy_queue->queue_lock);
2918 if (!req) { 2917 if (!req) {
2919 do_floppy = NULL; 2918 do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
2990 if (usage_count == 0) { 2989 if (usage_count == 0) {
2991 printk("warning: usage count=0, current_req=%p exiting\n", 2990 printk("warning: usage count=0, current_req=%p exiting\n",
2992 current_req); 2991 current_req);
2993 printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2992 printk("sect=%ld type=%x flags=%x\n",
2994 current_req->cmd_type, current_req->cmd_flags); 2993 (long)blk_rq_pos(current_req), current_req->cmd_type,
2994 current_req->cmd_flags);
2995 return; 2995 return;
2996 } 2996 }
2997 if (test_bit(0, &fdc_busy)) { 2997 if (test_bit(0, &fdc_busy)) {
@@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
4148{ 4148{
4149} 4149}
4150 4150
4151static int floppy_resume(struct platform_device *dev)
4152{
4153 int fdc;
4154
4155 for (fdc = 0; fdc < N_FDC; fdc++)
4156 if (FDCS->address != -1)
4157 user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
4158
4159 return 0;
4160}
4161
4162static struct platform_driver floppy_driver = {
4163 .resume = floppy_resume,
4164 .driver = {
4165 .name = "floppy",
4166 },
4167};
4168
4151static struct platform_device floppy_device[N_DRIVE]; 4169static struct platform_device floppy_device[N_DRIVE];
4152 4170
4153static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4171static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
4196 if (err) 4214 if (err)
4197 goto out_put_disk; 4215 goto out_put_disk;
4198 4216
4217 err = platform_driver_register(&floppy_driver);
4218 if (err)
4219 goto out_unreg_blkdev;
4220
4199 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); 4221 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
4200 if (!floppy_queue) { 4222 if (!floppy_queue) {
4201 err = -ENOMEM; 4223 err = -ENOMEM;
4202 goto out_unreg_blkdev; 4224 goto out_unreg_driver;
4203 } 4225 }
4204 blk_queue_max_sectors(floppy_queue, 64); 4226 blk_queue_max_sectors(floppy_queue, 64);
4205 4227
@@ -4346,6 +4368,8 @@ out_flush_work:
4346out_unreg_region: 4368out_unreg_region:
4347 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4369 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4348 blk_cleanup_queue(floppy_queue); 4370 blk_cleanup_queue(floppy_queue);
4371out_unreg_driver:
4372 platform_driver_unregister(&floppy_driver);
4349out_unreg_blkdev: 4373out_unreg_blkdev:
4350 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4374 unregister_blkdev(FLOPPY_MAJOR, "fd");
4351out_put_disk: 4375out_put_disk:
@@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
4566 4590
4567 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4591 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4568 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4592 unregister_blkdev(FLOPPY_MAJOR, "fd");
4593 platform_driver_unregister(&floppy_driver);
4569 4594
4570 for (drive = 0; drive < N_DRIVE; drive++) { 4595 for (drive = 0; drive < N_DRIVE; drive++) {
4571 del_timer_sync(&motor_off_timer[drive]); 4596 del_timer_sync(&motor_off_timer[drive]);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e50..f65b3f369eb0 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
98 98
99static DEFINE_SPINLOCK(hd_lock); 99static DEFINE_SPINLOCK(hd_lock);
100static struct request_queue *hd_queue; 100static struct request_queue *hd_queue;
101static struct request *hd_req;
101 102
102#define MAJOR_NR HD_MAJOR 103#define MAJOR_NR HD_MAJOR
103#define QUEUE (hd_queue)
104#define CURRENT elv_next_request(hd_queue)
105 104
106#define TIMEOUT_VALUE (6*HZ) 105#define TIMEOUT_VALUE (6*HZ)
107#define HD_DELAY 0 106#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
195 NR_HD = hdind+1; 194 NR_HD = hdind+1;
196} 195}
197 196
197static bool hd_end_request(int err, unsigned int bytes)
198{
199 if (__blk_end_request(hd_req, err, bytes))
200 return true;
201 hd_req = NULL;
202 return false;
203}
204
205static bool hd_end_request_cur(int err)
206{
207 return hd_end_request(err, blk_rq_cur_bytes(hd_req));
208}
209
198static void dump_status(const char *msg, unsigned int stat) 210static void dump_status(const char *msg, unsigned int stat)
199{ 211{
200 char *name = "hd?"; 212 char *name = "hd?";
201 if (CURRENT) 213 if (hd_req)
202 name = CURRENT->rq_disk->disk_name; 214 name = hd_req->rq_disk->disk_name;
203 215
204#ifdef VERBOSE_ERRORS 216#ifdef VERBOSE_ERRORS
205 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 217 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
227 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { 239 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 240 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 241 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
230 if (CURRENT) 242 if (hd_req)
231 printk(", sector=%ld", CURRENT->sector); 243 printk(", sector=%ld", blk_rq_pos(hd_req));
232 } 244 }
233 printk("\n"); 245 printk("\n");
234 } 246 }
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
406 */ 418 */
407static void bad_rw_intr(void) 419static void bad_rw_intr(void)
408{ 420{
409 struct request *req = CURRENT; 421 struct request *req = hd_req;
422
410 if (req != NULL) { 423 if (req != NULL) {
411 struct hd_i_struct *disk = req->rq_disk->private_data; 424 struct hd_i_struct *disk = req->rq_disk->private_data;
412 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { 425 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
413 end_request(req, 0); 426 hd_end_request_cur(-EIO);
414 disk->special_op = disk->recalibrate = 1; 427 disk->special_op = disk->recalibrate = 1;
415 } else if (req->errors % RESET_FREQ == 0) 428 } else if (req->errors % RESET_FREQ == 0)
416 reset = 1; 429 reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
452 bad_rw_intr(); 465 bad_rw_intr();
453 hd_request(); 466 hd_request();
454 return; 467 return;
468
455ok_to_read: 469ok_to_read:
456 req = CURRENT; 470 req = hd_req;
457 insw(HD_DATA, req->buffer, 256); 471 insw(HD_DATA, req->buffer, 256);
458 req->sector++;
459 req->buffer += 512;
460 req->errors = 0;
461 i = --req->nr_sectors;
462 --req->current_nr_sectors;
463#ifdef DEBUG 472#ifdef DEBUG
464 printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 473 printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
465 req->rq_disk->disk_name, req->sector, req->nr_sectors, 474 req->rq_disk->disk_name, blk_rq_pos(req) + 1,
466 req->buffer+512); 475 blk_rq_sectors(req) - 1, req->buffer+512);
467#endif 476#endif
468 if (req->current_nr_sectors <= 0) 477 if (hd_end_request(0, 512)) {
469 end_request(req, 1);
470 if (i > 0) {
471 SET_HANDLER(&read_intr); 478 SET_HANDLER(&read_intr);
472 return; 479 return;
473 } 480 }
481
474 (void) inb_p(HD_STATUS); 482 (void) inb_p(HD_STATUS);
475#if (HD_DELAY > 0) 483#if (HD_DELAY > 0)
476 last_req = read_timer(); 484 last_req = read_timer();
477#endif 485#endif
478 if (elv_next_request(QUEUE)) 486 hd_request();
479 hd_request();
480 return;
481} 487}
482 488
483static void write_intr(void) 489static void write_intr(void)
484{ 490{
485 struct request *req = CURRENT; 491 struct request *req = hd_req;
486 int i; 492 int i;
487 int retries = 100000; 493 int retries = 100000;
488 494
@@ -492,30 +498,25 @@ static void write_intr(void)
492 continue; 498 continue;
493 if (!OK_STATUS(i)) 499 if (!OK_STATUS(i))
494 break; 500 break;
495 if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 501 if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
496 goto ok_to_write; 502 goto ok_to_write;
497 } while (--retries > 0); 503 } while (--retries > 0);
498 dump_status("write_intr", i); 504 dump_status("write_intr", i);
499 bad_rw_intr(); 505 bad_rw_intr();
500 hd_request(); 506 hd_request();
501 return; 507 return;
508
502ok_to_write: 509ok_to_write:
503 req->sector++; 510 if (hd_end_request(0, 512)) {
504 i = --req->nr_sectors;
505 --req->current_nr_sectors;
506 req->buffer += 512;
507 if (!i || (req->bio && req->current_nr_sectors <= 0))
508 end_request(req, 1);
509 if (i > 0) {
510 SET_HANDLER(&write_intr); 511 SET_HANDLER(&write_intr);
511 outsw(HD_DATA, req->buffer, 256); 512 outsw(HD_DATA, req->buffer, 256);
512 } else { 513 return;
514 }
515
513#if (HD_DELAY > 0) 516#if (HD_DELAY > 0)
514 last_req = read_timer(); 517 last_req = read_timer();
515#endif 518#endif
516 hd_request(); 519 hd_request();
517 }
518 return;
519} 520}
520 521
521static void recal_intr(void) 522static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
537 538
538 do_hd = NULL; 539 do_hd = NULL;
539 540
540 if (!CURRENT) 541 if (!hd_req)
541 return; 542 return;
542 543
543 spin_lock_irq(hd_queue->queue_lock); 544 spin_lock_irq(hd_queue->queue_lock);
544 reset = 1; 545 reset = 1;
545 name = CURRENT->rq_disk->disk_name; 546 name = hd_req->rq_disk->disk_name;
546 printk("%s: timeout\n", name); 547 printk("%s: timeout\n", name);
547 if (++CURRENT->errors >= MAX_ERRORS) { 548 if (++hd_req->errors >= MAX_ERRORS) {
548#ifdef DEBUG 549#ifdef DEBUG
549 printk("%s: too many errors\n", name); 550 printk("%s: too many errors\n", name);
550#endif 551#endif
551 end_request(CURRENT, 0); 552 hd_end_request_cur(-EIO);
552 } 553 }
553 hd_request(); 554 hd_request();
554 spin_unlock_irq(hd_queue->queue_lock); 555 spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
563 } 564 }
564 if (disk->head > 16) { 565 if (disk->head > 16) {
565 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
566 end_request(req, 0); 567 hd_end_request_cur(-EIO);
567 } 568 }
568 disk->special_op = 0; 569 disk->special_op = 0;
569 return 1; 570 return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
590repeat: 591repeat:
591 del_timer(&device_timer); 592 del_timer(&device_timer);
592 593
593 req = CURRENT; 594 if (!hd_req) {
594 if (!req) { 595 hd_req = blk_fetch_request(hd_queue);
595 do_hd = NULL; 596 if (!hd_req) {
596 return; 597 do_hd = NULL;
598 return;
599 }
597 } 600 }
601 req = hd_req;
598 602
599 if (reset) { 603 if (reset) {
600 reset_hd(); 604 reset_hd();
601 return; 605 return;
602 } 606 }
603 disk = req->rq_disk->private_data; 607 disk = req->rq_disk->private_data;
604 block = req->sector; 608 block = blk_rq_pos(req);
605 nsect = req->nr_sectors; 609 nsect = blk_rq_sectors(req);
606 if (block >= get_capacity(req->rq_disk) || 610 if (block >= get_capacity(req->rq_disk) ||
607 ((block+nsect) > get_capacity(req->rq_disk))) { 611 ((block+nsect) > get_capacity(req->rq_disk))) {
608 printk("%s: bad access: block=%d, count=%d\n", 612 printk("%s: bad access: block=%d, count=%d\n",
609 req->rq_disk->disk_name, block, nsect); 613 req->rq_disk->disk_name, block, nsect);
610 end_request(req, 0); 614 hd_end_request_cur(-EIO);
611 goto repeat; 615 goto repeat;
612 } 616 }
613 617
@@ -647,7 +651,7 @@ repeat:
647 break; 651 break;
648 default: 652 default:
649 printk("unknown hd-command\n"); 653 printk("unknown hd-command\n");
650 end_request(req, 0); 654 hd_end_request_cur(-EIO);
651 break; 655 break;
652 } 656 }
653 } 657 }
@@ -720,7 +724,7 @@ static int __init hd_init(void)
720 blk_queue_max_sectors(hd_queue, 255); 724 blk_queue_max_sectors(hd_queue, 255);
721 init_timer(&device_timer); 725 init_timer(&device_timer);
722 device_timer.function = hd_times_out; 726 device_timer.function = hd_times_out;
723 blk_queue_hardsect_size(hd_queue, 512); 727 blk_queue_logical_block_size(hd_queue, 512);
724 728
725 if (!NR_HD) { 729 if (!NR_HD) {
726 /* 730 /*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae80825899..801f4ab83302 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
511 */ 511 */
512static void loop_add_bio(struct loop_device *lo, struct bio *bio) 512static void loop_add_bio(struct loop_device *lo, struct bio *bio)
513{ 513{
514 if (lo->lo_biotail) { 514 bio_list_add(&lo->lo_bio_list, bio);
515 lo->lo_biotail->bi_next = bio;
516 lo->lo_biotail = bio;
517 } else
518 lo->lo_bio = lo->lo_biotail = bio;
519} 515}
520 516
521/* 517/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
523 */ 519 */
524static struct bio *loop_get_bio(struct loop_device *lo) 520static struct bio *loop_get_bio(struct loop_device *lo)
525{ 521{
526 struct bio *bio; 522 return bio_list_pop(&lo->lo_bio_list);
527
528 if ((bio = lo->lo_bio)) {
529 if (bio == lo->lo_biotail)
530 lo->lo_biotail = NULL;
531 lo->lo_bio = bio->bi_next;
532 bio->bi_next = NULL;
533 }
534
535 return bio;
536} 523}
537 524
538static int loop_make_request(struct request_queue *q, struct bio *old_bio) 525static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
609 596
610 set_user_nice(current, -20); 597 set_user_nice(current, -20);
611 598
612 while (!kthread_should_stop() || lo->lo_bio) { 599 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
613 600
614 wait_event_interruptible(lo->lo_event, 601 wait_event_interruptible(lo->lo_event,
615 lo->lo_bio || kthread_should_stop()); 602 !bio_list_empty(&lo->lo_bio_list) ||
603 kthread_should_stop());
616 604
617 if (!lo->lo_bio) 605 if (bio_list_empty(&lo->lo_bio_list))
618 continue; 606 continue;
619 spin_lock_irq(&lo->lo_lock); 607 spin_lock_irq(&lo->lo_lock);
620 bio = loop_get_bio(lo); 608 bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
721 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 709 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
722 goto out_putf; 710 goto out_putf;
723 711
724 /* new backing store needs to support loop (eg splice_read) */
725 if (!inode->i_fop->splice_read)
726 goto out_putf;
727
728 /* size of the new backing store needs to be the same */ 712 /* size of the new backing store needs to be the same */
729 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 713 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
730 goto out_putf; 714 goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
800 error = -EINVAL; 784 error = -EINVAL;
801 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 785 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
802 const struct address_space_operations *aops = mapping->a_ops; 786 const struct address_space_operations *aops = mapping->a_ops;
803 /* 787
804 * If we can't read - sorry. If we only can't write - well,
805 * it's going to be read-only.
806 */
807 if (!file->f_op->splice_read)
808 goto out_putf;
809 if (aops->write_begin) 788 if (aops->write_begin)
810 lo_flags |= LO_FLAGS_USE_AOPS; 789 lo_flags |= LO_FLAGS_USE_AOPS;
811 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 790 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
841 lo->old_gfp_mask = mapping_gfp_mask(mapping); 820 lo->old_gfp_mask = mapping_gfp_mask(mapping);
842 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 821 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
843 822
844 lo->lo_bio = lo->lo_biotail = NULL; 823 bio_list_init(&lo->lo_bio_list);
845 824
846 /* 825 /*
847 * set queue make_request_fn, and add limits based on lower level 826 * set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a8..60de5a01e71e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/hdreg.h> 19#include <linux/hdreg.h>
20#include <linux/libata.h> 20#include <linux/ata.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26 25
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) 26#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28 27
28/* name for block device */
29#define MG_DISK_NAME "mgd"
30/* name for platform device */
31#define MG_DEV_NAME "mg_disk"
32
33#define MG_DISK_MAJ 0
34#define MG_DISK_MAX_PART 16
35#define MG_SECTOR_SIZE 512
36#define MG_MAX_SECTS 256
37
38/* Register offsets */
39#define MG_BUFF_OFFSET 0x8000
40#define MG_STORAGE_BUFFER_SIZE 0x200
41#define MG_REG_OFFSET 0xC000
42#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53
54/* handy status */
55#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
56#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
57 ATA_ERR))) == MG_STAT_READY)
58
59/* error code for others */
60#define MG_ERR_NONE 0
61#define MG_ERR_TIMEOUT 0x100
62#define MG_ERR_INIT_STAT 0x101
63#define MG_ERR_TRANSLATION 0x102
64#define MG_ERR_CTRL_RST 0x103
65#define MG_ERR_INV_STAT 0x104
66#define MG_ERR_RSTOUT 0x105
67
68#define MG_MAX_ERRORS 6 /* Max read/write errors */
69
70/* command */
71#define MG_CMD_RD 0x20
72#define MG_CMD_WR 0x30
73#define MG_CMD_SLEEP 0x99
74#define MG_CMD_WAKEUP 0xC3
75#define MG_CMD_ID 0xEC
76#define MG_CMD_WR_CONF 0x3C
77#define MG_CMD_RD_CONF 0x40
78
79/* operation mode */
80#define MG_OP_CASCADE (1 << 0)
81#define MG_OP_CASCADE_SYNC_RD (1 << 1)
82#define MG_OP_CASCADE_SYNC_WR (1 << 2)
83#define MG_OP_INTERLEAVE (1 << 3)
84
85/* synchronous */
86#define MG_BURST_LAT_4 (3 << 4)
87#define MG_BURST_LAT_5 (4 << 4)
88#define MG_BURST_LAT_6 (5 << 4)
89#define MG_BURST_LAT_7 (6 << 4)
90#define MG_BURST_LAT_8 (7 << 4)
91#define MG_BURST_LEN_4 (1 << 1)
92#define MG_BURST_LEN_8 (2 << 1)
93#define MG_BURST_LEN_16 (3 << 1)
94#define MG_BURST_LEN_32 (4 << 1)
95#define MG_BURST_LEN_CONT (0 << 1)
96
97/* timeout value (unit: ms) */
98#define MG_TMAX_CONF_TO_CMD 1
99#define MG_TMAX_WAIT_RD_DRQ 10
100#define MG_TMAX_WAIT_WR_DRQ 500
101#define MG_TMAX_RST_TO_BUSY 10
102#define MG_TMAX_HDRST_TO_RDY 500
103#define MG_TMAX_SWRST_TO_RDY 500
104#define MG_TMAX_RSTOUT 3000
105
106/* device attribution */
107/* use mflash as boot device */
108#define MG_BOOT_DEV (1 << 0)
109/* use mflash as storage device */
110#define MG_STORAGE_DEV (1 << 1)
111/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
112#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
113
114#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
115
116/* names of GPIO resource */
117#define MG_RST_PIN "mg_rst"
118/* except MG_BOOT_DEV, reset-out pin should be assigned */
119#define MG_RSTOUT_PIN "mg_rstout"
120
121/* private driver data */
122struct mg_drv_data {
123 /* disk resource */
124 u32 use_polling;
125
126 /* device attribution */
127 u32 dev_attr;
128
129 /* internally used */
130 struct mg_host *host;
131};
132
133/* main structure for mflash driver */
134struct mg_host {
135 struct device *dev;
136
137 struct request_queue *breq;
138 struct request *req;
139 spinlock_t lock;
140 struct gendisk *gd;
141
142 struct timer_list timer;
143 void (*mg_do_intr) (struct mg_host *);
144
145 u16 id[ATA_ID_WORDS];
146
147 u16 cyls;
148 u16 heads;
149 u16 sectors;
150 u32 n_sectors;
151 u32 nres_sectors;
152
153 void __iomem *dev_base;
154 unsigned int irq;
155 unsigned int rst;
156 unsigned int rstout;
157
158 u32 major;
159 u32 error;
160};
161
162/*
163 * Debugging macro and defines
164 */
165#undef DO_MG_DEBUG
166#ifdef DO_MG_DEBUG
167# define MG_DBG(fmt, args...) \
168 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
169#else /* CONFIG_MG_DEBUG */
170# define MG_DBG(fmt, args...) do { } while (0)
171#endif /* CONFIG_MG_DEBUG */
172
29static void mg_request(struct request_queue *); 173static void mg_request(struct request_queue *);
30 174
175static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
176{
177 if (__blk_end_request(host->req, err, nr_bytes))
178 return true;
179
180 host->req = NULL;
181 return false;
182}
183
184static bool mg_end_request_cur(struct mg_host *host, int err)
185{
186 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
187}
188
31static void mg_dump_status(const char *msg, unsigned int stat, 189static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host) 190 struct mg_host *host)
33{ 191{
34 char *name = MG_DISK_NAME; 192 char *name = MG_DISK_NAME;
35 struct request *req;
36 193
37 if (host->breq) { 194 if (host->req)
38 req = elv_next_request(host->breq); 195 name = host->req->rq_disk->disk_name;
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42 196
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 197 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY) 198 if (stat & ATA_BUSY)
45 printk("Busy "); 199 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY) 200 if (stat & ATA_DRDY)
47 printk("DriveReady "); 201 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT) 202 if (stat & ATA_DF)
49 printk("WriteFault "); 203 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE) 204 if (stat & ATA_DSC)
51 printk("SeekComplete "); 205 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ) 206 if (stat & ATA_DRQ)
53 printk("DataRequest "); 207 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR) 208 if (stat & ATA_CORR)
55 printk("CorrectedError "); 209 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR) 210 if (stat & ATA_ERR)
57 printk("Error "); 211 printk("Error ");
58 printk("}\n"); 212 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) { 213 if ((stat & ATA_ERR) == 0) {
60 host->error = 0; 214 host->error = 0;
61 } else { 215 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); 216 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, 217 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff); 218 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK) 219 if (host->error & ATA_BBK)
66 printk("BadSector "); 220 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC) 221 if (host->error & ATA_UNC)
68 printk("UncorrectableError "); 222 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF) 223 if (host->error & ATA_IDNF)
70 printk("SectorIdNotFound "); 224 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT) 225 if (host->error & ATA_ABORTED)
72 printk("DriveStatusError "); 226 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF) 227 if (host->error & ATA_AMNF)
74 printk("AddrMarkNotFound "); 228 printk("AddrMarkNotFound ");
75 printk("}"); 229 printk("}");
76 if (host->error & 230 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC | 231 if (host->req)
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) { 232 printk(", sector=%u",
79 if (host->breq) { 233 (unsigned int)blk_rq_pos(host->req));
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%u", (u32)req->sector);
83 }
84
85 } 234 }
86 printk("\n"); 235 printk("\n");
87 } 236 }
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
100 249
101 do { 250 do {
102 cur_jiffies = jiffies; 251 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) { 252 if (status & ATA_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY) 253 if (expect == ATA_BUSY)
105 break; 254 break;
106 } else { 255 } else {
107 /* Check the error condition! */ 256 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) { 257 if (status & ATA_ERR) {
109 mg_dump_status("mg_wait", status, host); 258 mg_dump_status("mg_wait", status, host);
110 break; 259 break;
111 } 260 }
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
114 if (MG_READY_OK(status)) 263 if (MG_READY_OK(status))
115 break; 264 break;
116 265
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ) 266 if (expect == ATA_DRQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ) 267 if (status & ATA_DRQ)
119 break; 268 break;
120 } 269 }
121 if (!msec) { 270 if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
173 return IRQ_HANDLED; 322 return IRQ_HANDLED;
174} 323}
175 324
325/* local copy of ata_id_string() */
326static void mg_id_string(const u16 *id, unsigned char *s,
327 unsigned int ofs, unsigned int len)
328{
329 unsigned int c;
330
331 BUG_ON(len & 1);
332
333 while (len > 0) {
334 c = id[ofs] >> 8;
335 *s = c;
336 s++;
337
338 c = id[ofs] & 0xff;
339 *s = c;
340 s++;
341
342 ofs++;
343 len -= 2;
344 }
345}
346
347/* local copy of ata_id_c_string() */
348static void mg_id_c_string(const u16 *id, unsigned char *s,
349 unsigned int ofs, unsigned int len)
350{
351 unsigned char *p;
352
353 mg_id_string(id, s, ofs, len - 1);
354
355 p = s + strnlen(s, len - 1);
356 while (p > s && p[-1] == ' ')
357 p--;
358 *p = '\0';
359}
360
176static int mg_get_disk_id(struct mg_host *host) 361static int mg_get_disk_id(struct mg_host *host)
177{ 362{
178 u32 i; 363 u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
184 char serial[ATA_ID_SERNO_LEN + 1]; 369 char serial[ATA_ID_SERNO_LEN + 1];
185 370
186 if (!prv_data->use_polling) 371 if (!prv_data->use_polling)
187 outb(MG_REG_CTRL_INTR_DISABLE, 372 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
188 (unsigned long)host->dev_base +
189 MG_REG_DRV_CTRL);
190 373
191 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); 374 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
192 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ); 375 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
193 if (err) 376 if (err)
194 return err; 377 return err;
195 378
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
219 host->n_sectors -= host->nres_sectors; 402 host->n_sectors -= host->nres_sectors;
220 } 403 }
221 404
222 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 405 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
223 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 406 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
224 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 407 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
225 printk(KERN_INFO "mg_disk: model: %s\n", model); 408 printk(KERN_INFO "mg_disk: model: %s\n", model);
226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); 409 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
227 printk(KERN_INFO "mg_disk: serial: %s\n", serial); 410 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
229 host->n_sectors, host->nres_sectors); 412 host->n_sectors, host->nres_sectors);
230 413
231 if (!prv_data->use_polling) 414 if (!prv_data->use_polling)
232 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 415 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
233 MG_REG_DRV_CTRL);
234 416
235 return err; 417 return err;
236} 418}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
244 426
245 /* hdd rst low */ 427 /* hdd rst low */
246 gpio_set_value(host->rst, 0); 428 gpio_set_value(host->rst, 0);
247 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 429 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
248 if (err) 430 if (err)
249 return err; 431 return err;
250 432
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
255 return err; 437 return err;
256 438
257 /* soft reset on */ 439 /* soft reset on */
258 outb(MG_REG_CTRL_RESET | 440 outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
259 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
260 MG_REG_CTRL_INTR_ENABLE),
261 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 441 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
262 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 442 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
263 if (err) 443 if (err)
264 return err; 444 return err;
265 445
266 /* soft reset off */ 446 /* soft reset off */
267 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 447 outb(prv_data->use_polling ? ATA_NIEN : 0,
268 MG_REG_CTRL_INTR_ENABLE,
269 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 448 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
270 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); 449 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
271 if (err) 450 if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
281 460
282static void mg_bad_rw_intr(struct mg_host *host) 461static void mg_bad_rw_intr(struct mg_host *host)
283{ 462{
284 struct request *req = elv_next_request(host->breq); 463 if (host->req)
285 if (req != NULL) 464 if (++host->req->errors >= MG_MAX_ERRORS ||
286 if (++req->errors >= MG_MAX_ERRORS || 465 host->error == MG_ERR_TIMEOUT)
287 host->error == MG_ERR_TIMEOUT) 466 mg_end_request_cur(host, -EIO);
288 end_request(req, 0);
289} 467}
290 468
291static unsigned int mg_out(struct mg_host *host, 469static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
311 MG_REG_CYL_LOW); 489 MG_REG_CYL_LOW);
312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + 490 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
313 MG_REG_CYL_HIGH); 491 MG_REG_CYL_HIGH);
314 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE), 492 outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); 493 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); 494 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
317 return MG_ERR_NONE; 495 return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
319 497
320static void mg_read(struct request *req) 498static void mg_read(struct request *req)
321{ 499{
322 u32 remains, j; 500 u32 j;
323 struct mg_host *host = req->rq_disk->private_data; 501 struct mg_host *host = req->rq_disk->private_data;
324 502
325 remains = req->nr_sectors; 503 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
326 504 MG_CMD_RD, NULL) != MG_ERR_NONE)
327 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
328 MG_ERR_NONE)
329 mg_bad_rw_intr(host); 505 mg_bad_rw_intr(host);
330 506
331 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 507 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
332 remains, req->sector, req->buffer); 508 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
509
510 do {
511 u16 *buff = (u16 *)req->buffer;
333 512
334 while (remains) { 513 if (mg_wait(host, ATA_DRQ,
335 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 514 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
336 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
337 mg_bad_rw_intr(host); 515 mg_bad_rw_intr(host);
338 return; 516 return;
339 } 517 }
340 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 518 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
341 *(u16 *)req->buffer = 519 *buff++ = inw((unsigned long)host->dev_base +
342 inw((unsigned long)host->dev_base + 520 MG_BUFF_OFFSET + (j << 1));
343 MG_BUFF_OFFSET + (j << 1));
344 req->buffer += 2;
345 }
346
347 req->sector++;
348 req->errors = 0;
349 remains = --req->nr_sectors;
350 --req->current_nr_sectors;
351
352 if (req->current_nr_sectors <= 0) {
353 MG_DBG("remain : %d sects\n", remains);
354 end_request(req, 1);
355 if (remains > 0)
356 req = elv_next_request(host->breq);
357 }
358 521
359 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 522 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
360 MG_REG_COMMAND); 523 MG_REG_COMMAND);
361 } 524 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
362} 525}
363 526
364static void mg_write(struct request *req) 527static void mg_write(struct request *req)
365{ 528{
366 u32 remains, j; 529 u32 j;
367 struct mg_host *host = req->rq_disk->private_data; 530 struct mg_host *host = req->rq_disk->private_data;
368 531
369 remains = req->nr_sectors; 532 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
370 533 MG_CMD_WR, NULL) != MG_ERR_NONE) {
371 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
372 MG_ERR_NONE) {
373 mg_bad_rw_intr(host); 534 mg_bad_rw_intr(host);
374 return; 535 return;
375 } 536 }
376 537
377
378 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 538 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
379 remains, req->sector, req->buffer); 539 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
380 while (remains) { 540
381 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 541 do {
382 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 542 u16 *buff = (u16 *)req->buffer;
543
544 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
383 mg_bad_rw_intr(host); 545 mg_bad_rw_intr(host);
384 return; 546 return;
385 } 547 }
386 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 548 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
387 outw(*(u16 *)req->buffer, 549 outw(*buff++, (unsigned long)host->dev_base +
388 (unsigned long)host->dev_base + 550 MG_BUFF_OFFSET + (j << 1));
389 MG_BUFF_OFFSET + (j << 1));
390 req->buffer += 2;
391 }
392 req->sector++;
393 remains = --req->nr_sectors;
394 --req->current_nr_sectors;
395
396 if (req->current_nr_sectors <= 0) {
397 MG_DBG("remain : %d sects\n", remains);
398 end_request(req, 1);
399 if (remains > 0)
400 req = elv_next_request(host->breq);
401 }
402 551
403 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 552 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
404 MG_REG_COMMAND); 553 MG_REG_COMMAND);
405 } 554 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
406} 555}
407 556
408static void mg_read_intr(struct mg_host *host) 557static void mg_read_intr(struct mg_host *host)
409{ 558{
559 struct request *req = host->req;
410 u32 i; 560 u32 i;
411 struct request *req; 561 u16 *buff;
412 562
413 /* check status */ 563 /* check status */
414 do { 564 do {
415 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 565 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
416 if (i & MG_REG_STATUS_BIT_BUSY) 566 if (i & ATA_BUSY)
417 break; 567 break;
418 if (!MG_READY_OK(i)) 568 if (!MG_READY_OK(i))
419 break; 569 break;
420 if (i & MG_REG_STATUS_BIT_DATA_REQ) 570 if (i & ATA_DRQ)
421 goto ok_to_read; 571 goto ok_to_read;
422 } while (0); 572 } while (0);
423 mg_dump_status("mg_read_intr", i, host); 573 mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
427 577
428ok_to_read: 578ok_to_read:
429 /* get current segment of request */ 579 /* get current segment of request */
430 req = elv_next_request(host->breq); 580 buff = (u16 *)req->buffer;
431 581
432 /* read 1 sector */ 582 /* read 1 sector */
433 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 583 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
434 *(u16 *)req->buffer = 584 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
435 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 585 (i << 1));
436 (i << 1));
437 req->buffer += 2;
438 }
439 586
440 /* manipulate request */
441 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 587 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
442 req->sector, req->nr_sectors - 1, req->buffer); 588 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
443
444 req->sector++;
445 req->errors = 0;
446 i = --req->nr_sectors;
447 --req->current_nr_sectors;
448
449 /* let know if current segment done */
450 if (req->current_nr_sectors <= 0)
451 end_request(req, 1);
452
453 /* set handler if read remains */
454 if (i > 0) {
455 host->mg_do_intr = mg_read_intr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
457 }
458 589
459 /* send read confirm */ 590 /* send read confirm */
460 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
461 592
462 /* goto next request */ 593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
463 if (!i) 594 /* set handler if read remains */
595 host->mg_do_intr = mg_read_intr;
596 mod_timer(&host->timer, jiffies + 3 * HZ);
597 } else /* goto next request */
464 mg_request(host->breq); 598 mg_request(host->breq);
465} 599}
466 600
467static void mg_write_intr(struct mg_host *host) 601static void mg_write_intr(struct mg_host *host)
468{ 602{
603 struct request *req = host->req;
469 u32 i, j; 604 u32 i, j;
470 u16 *buff; 605 u16 *buff;
471 struct request *req; 606 bool rem;
472
473 /* get current segment of request */
474 req = elv_next_request(host->breq);
475 607
476 /* check status */ 608 /* check status */
477 do { 609 do {
478 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 610 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
479 if (i & MG_REG_STATUS_BIT_BUSY) 611 if (i & ATA_BUSY)
480 break; 612 break;
481 if (!MG_READY_OK(i)) 613 if (!MG_READY_OK(i))
482 break; 614 break;
483 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ)) 615 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
484 goto ok_to_write; 616 goto ok_to_write;
485 } while (0); 617 } while (0);
486 mg_dump_status("mg_write_intr", i, host); 618 mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
489 return; 621 return;
490 622
491ok_to_write: 623ok_to_write:
492 /* manipulate request */ 624 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
493 req->sector++; 625 /* write 1 sector and set handler if remains */
494 i = --req->nr_sectors;
495 --req->current_nr_sectors;
496 req->buffer += MG_SECTOR_SIZE;
497
498 /* let know if current segment or all done */
499 if (!i || (req->bio && req->current_nr_sectors <= 0))
500 end_request(req, 1);
501
502 /* write 1 sector and set handler if remains */
503 if (i > 0) {
504 buff = (u16 *)req->buffer; 626 buff = (u16 *)req->buffer;
505 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 627 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
506 outw(*buff, (unsigned long)host->dev_base + 628 outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
508 buff++; 630 buff++;
509 } 631 }
510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 632 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
511 req->sector, req->nr_sectors, req->buffer); 633 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
512 host->mg_do_intr = mg_write_intr; 634 host->mg_do_intr = mg_write_intr;
513 mod_timer(&host->timer, jiffies + 3 * HZ); 635 mod_timer(&host->timer, jiffies + 3 * HZ);
514 } 636 }
@@ -516,7 +638,7 @@ ok_to_write:
516 /* send write confirm */ 638 /* send write confirm */
517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 639 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
518 640
519 if (!i) 641 if (!rem)
520 mg_request(host->breq); 642 mg_request(host->breq);
521} 643}
522 644
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
524{ 646{
525 struct mg_host *host = (struct mg_host *)data; 647 struct mg_host *host = (struct mg_host *)data;
526 char *name; 648 char *name;
527 struct request *req;
528 649
529 spin_lock_irq(&host->lock); 650 spin_lock_irq(&host->lock);
530 651
531 req = elv_next_request(host->breq); 652 if (!host->req)
532 if (!req)
533 goto out_unlock; 653 goto out_unlock;
534 654
535 host->mg_do_intr = NULL; 655 host->mg_do_intr = NULL;
536 656
537 name = req->rq_disk->disk_name; 657 name = host->req->rq_disk->disk_name;
538 printk(KERN_DEBUG "%s: timeout\n", name); 658 printk(KERN_DEBUG "%s: timeout\n", name);
539 659
540 host->error = MG_ERR_TIMEOUT; 660 host->error = MG_ERR_TIMEOUT;
541 mg_bad_rw_intr(host); 661 mg_bad_rw_intr(host);
542 662
543 mg_request(host->breq);
544out_unlock: 663out_unlock:
664 mg_request(host->breq);
545 spin_unlock_irq(&host->lock); 665 spin_unlock_irq(&host->lock);
546} 666}
547 667
548static void mg_request_poll(struct request_queue *q) 668static void mg_request_poll(struct request_queue *q)
549{ 669{
550 struct request *req; 670 struct mg_host *host = q->queuedata;
551 struct mg_host *host;
552 671
553 while ((req = elv_next_request(q)) != NULL) { 672 while (1) {
554 host = req->rq_disk->private_data; 673 if (!host->req) {
555 if (blk_fs_request(req)) { 674 host->req = blk_fetch_request(q);
556 switch (rq_data_dir(req)) { 675 if (!host->req)
557 case READ:
558 mg_read(req);
559 break;
560 case WRITE:
561 mg_write(req);
562 break;
563 default:
564 printk(KERN_WARNING "%s:%d unknown command\n",
565 __func__, __LINE__);
566 end_request(req, 0);
567 break; 676 break;
568 }
569 } 677 }
678
679 if (unlikely(!blk_fs_request(host->req))) {
680 mg_end_request_cur(host, -EIO);
681 continue;
682 }
683
684 if (rq_data_dir(host->req) == READ)
685 mg_read(host->req);
686 else
687 mg_write(host->req);
570 } 688 }
571} 689}
572 690
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
588 break; 706 break;
589 case WRITE: 707 case WRITE:
590 /* TODO : handler */ 708 /* TODO : handler */
591 outb(MG_REG_CTRL_INTR_DISABLE, 709 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
592 (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 710 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
595 != MG_ERR_NONE) { 711 != MG_ERR_NONE) {
596 mg_bad_rw_intr(host); 712 mg_bad_rw_intr(host);
597 return host->error; 713 return host->error;
598 } 714 }
599 del_timer(&host->timer); 715 del_timer(&host->timer);
600 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ); 716 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
601 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 717 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
602 MG_REG_DRV_CTRL);
603 if (host->error) { 718 if (host->error) {
604 mg_bad_rw_intr(host); 719 mg_bad_rw_intr(host);
605 return host->error; 720 return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 729 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
615 MG_REG_COMMAND); 730 MG_REG_COMMAND);
616 break; 731 break;
617 default:
618 printk(KERN_WARNING "%s:%d unknown command\n",
619 __func__, __LINE__);
620 end_request(req, 0);
621 break;
622 } 732 }
623 return MG_ERR_NONE; 733 return MG_ERR_NONE;
624} 734}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
626/* This function also called from IRQ context */ 736/* This function also called from IRQ context */
627static void mg_request(struct request_queue *q) 737static void mg_request(struct request_queue *q)
628{ 738{
739 struct mg_host *host = q->queuedata;
629 struct request *req; 740 struct request *req;
630 struct mg_host *host;
631 u32 sect_num, sect_cnt; 741 u32 sect_num, sect_cnt;
632 742
633 while (1) { 743 while (1) {
634 req = elv_next_request(q); 744 if (!host->req) {
635 if (!req) 745 host->req = blk_fetch_request(q);
636 return; 746 if (!host->req)
637 747 break;
638 host = req->rq_disk->private_data; 748 }
749 req = host->req;
639 750
640 /* check unwanted request call */ 751 /* check unwanted request call */
641 if (host->mg_do_intr) 752 if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
643 754
644 del_timer(&host->timer); 755 del_timer(&host->timer);
645 756
646 sect_num = req->sector; 757 sect_num = blk_rq_pos(req);
647 /* deal whole segments */ 758 /* deal whole segments */
648 sect_cnt = req->nr_sectors; 759 sect_cnt = blk_rq_sectors(req);
649 760
650 /* sanity check */ 761 /* sanity check */
651 if (sect_num >= get_capacity(req->rq_disk) || 762 if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
655 "%s: bad access: sector=%d, count=%d\n", 766 "%s: bad access: sector=%d, count=%d\n",
656 req->rq_disk->disk_name, 767 req->rq_disk->disk_name,
657 sect_num, sect_cnt); 768 sect_num, sect_cnt);
658 end_request(req, 0); 769 mg_end_request_cur(host, -EIO);
659 continue; 770 continue;
660 } 771 }
661 772
662 if (!blk_fs_request(req)) 773 if (unlikely(!blk_fs_request(req))) {
663 return; 774 mg_end_request_cur(host, -EIO);
775 continue;
776 }
664 777
665 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 778 if (!mg_issue_req(req, host, sect_num, sect_cnt))
666 return; 779 return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
690 return -EIO; 803 return -EIO;
691 804
692 if (!prv_data->use_polling) 805 if (!prv_data->use_polling)
693 outb(MG_REG_CTRL_INTR_DISABLE, 806 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
694 (unsigned long)host->dev_base +
695 MG_REG_DRV_CTRL);
696 807
697 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); 808 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
698 /* wait until mflash deep sleep */ 809 /* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
700 811
701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { 812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
702 if (!prv_data->use_polling) 813 if (!prv_data->use_polling)
703 outb(MG_REG_CTRL_INTR_ENABLE, 814 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
704 (unsigned long)host->dev_base +
705 MG_REG_DRV_CTRL);
706 return -EIO; 815 return -EIO;
707 } 816 }
708 817
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
725 return -EIO; 834 return -EIO;
726 835
727 if (!prv_data->use_polling) 836 if (!prv_data->use_polling)
728 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 837 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
729 MG_REG_DRV_CTRL);
730 838
731 return 0; 839 return 0;
732} 840}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
877 __func__, __LINE__); 985 __func__, __LINE__);
878 goto probe_err_5; 986 goto probe_err_5;
879 } 987 }
988 host->breq->queuedata = host;
880 989
881 /* mflash is random device, thanx for the noop */ 990 /* mflash is random device, thanx for the noop */
882 elevator_exit(host->breq->elevator); 991 elevator_exit(host->breq->elevator);
@@ -887,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
887 goto probe_err_6; 996 goto probe_err_6;
888 } 997 }
889 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
890 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); 999 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
891 1000
892 init_timer(&host->timer); 1001 init_timer(&host->timer);
893 host->timer.function = mg_times_out; 1002 host->timer.function = mg_times_out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15ccb..5d23ffad7c77 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
110 req, error ? "failed" : "done"); 110 req, error ? "failed" : "done");
111 111
112 spin_lock_irqsave(q->queue_lock, flags); 112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request(req, error, req->nr_sectors << 9); 113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags); 114 spin_unlock_irqrestore(q->queue_lock, flags);
115} 115}
116 116
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
231{ 231{
232 int result, flags; 232 int result, flags;
233 struct nbd_request request; 233 struct nbd_request request;
234 unsigned long size = req->nr_sectors << 9; 234 unsigned long size = blk_rq_bytes(req);
235 235
236 request.magic = htonl(NBD_REQUEST_MAGIC); 236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req)); 237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64) req->sector << 9); 238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size); 239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req)); 240 memcpy(request.handle, &req, sizeof(req));
241 241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req, 243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)), 244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)req->sector << 9, 245 (unsigned long long)blk_rq_pos(req) << 9,
246 req->nr_sectors << 9); 246 blk_rq_bytes(req));
247 result = sock_xmit(lo, 1, &request, sizeof(request), 247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) { 249 if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
533{ 533{
534 struct request *req; 534 struct request *req;
535 535
536 while ((req = elv_next_request(q)) != NULL) { 536 while ((req = blk_fetch_request(q)) != NULL) {
537 struct nbd_device *lo; 537 struct nbd_device *lo;
538 538
539 blkdev_dequeue_request(req);
540
541 spin_unlock_irq(q->queue_lock); 539 spin_unlock_irq(q->queue_lock);
542 540
543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 541 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
580 blk_rq_init(NULL, &sreq); 578 blk_rq_init(NULL, &sreq);
581 sreq.cmd_type = REQ_TYPE_SPECIAL; 579 sreq.cmd_type = REQ_TYPE_SPECIAL;
582 nbd_cmd(&sreq) = NBD_CMD_DISC; 580 nbd_cmd(&sreq) = NBD_CMD_DISC;
583 /*
584 * Set these to sane values in case server implementation
585 * fails to check the request type first and also to keep
586 * debugging output cleaner.
587 */
588 sreq.sector = 0;
589 sreq.nr_sectors = 0;
590 if (!lo->sock) 581 if (!lo->sock)
591 return -EINVAL; 582 return -EINVAL;
592 nbd_send_req(lo, &sreq); 583 nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014f..911dfd98d813 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
719 if (pcd_busy) 719 if (pcd_busy)
720 return; 720 return;
721 while (1) { 721 while (1) {
722 pcd_req = elv_next_request(q); 722 if (!pcd_req) {
723 if (!pcd_req) 723 pcd_req = blk_fetch_request(q);
724 return; 724 if (!pcd_req)
725 return;
726 }
725 727
726 if (rq_data_dir(pcd_req) == READ) { 728 if (rq_data_dir(pcd_req) == READ) {
727 struct pcd_unit *cd = pcd_req->rq_disk->private_data; 729 struct pcd_unit *cd = pcd_req->rq_disk->private_data;
728 if (cd != pcd_current) 730 if (cd != pcd_current)
729 pcd_bufblk = -1; 731 pcd_bufblk = -1;
730 pcd_current = cd; 732 pcd_current = cd;
731 pcd_sector = pcd_req->sector; 733 pcd_sector = blk_rq_pos(pcd_req);
732 pcd_count = pcd_req->current_nr_sectors; 734 pcd_count = blk_rq_cur_sectors(pcd_req);
733 pcd_buf = pcd_req->buffer; 735 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1; 736 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice); 737 ps_set_intr(do_pcd_read, NULL, 0, nice);
736 return; 738 return;
737 } else 739 } else {
738 end_request(pcd_req, 0); 740 __blk_end_request_all(pcd_req, -EIO);
741 pcd_req = NULL;
742 }
739 } 743 }
740} 744}
741 745
742static inline void next_request(int success) 746static inline void next_request(int err)
743{ 747{
744 unsigned long saved_flags; 748 unsigned long saved_flags;
745 749
746 spin_lock_irqsave(&pcd_lock, saved_flags); 750 spin_lock_irqsave(&pcd_lock, saved_flags);
747 end_request(pcd_req, success); 751 if (!__blk_end_request_cur(pcd_req, err))
752 pcd_req = NULL;
748 pcd_busy = 0; 753 pcd_busy = 0;
749 do_pcd_request(pcd_queue); 754 do_pcd_request(pcd_queue);
750 spin_unlock_irqrestore(&pcd_lock, saved_flags); 755 spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
781 786
782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { 787 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
783 pcd_bufblk = -1; 788 pcd_bufblk = -1;
784 next_request(0); 789 next_request(-EIO);
785 return; 790 return;
786 } 791 }
787 792
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
796 pcd_retries = 0; 801 pcd_retries = 0;
797 pcd_transfer(); 802 pcd_transfer();
798 if (!pcd_count) { 803 if (!pcd_count) {
799 next_request(1); 804 next_request(0);
800 return; 805 return;
801 } 806 }
802 807
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
815 return; 820 return;
816 } 821 }
817 pcd_bufblk = -1; 822 pcd_bufblk = -1;
818 next_request(0); 823 next_request(-EIO);
819 return; 824 return;
820 } 825 }
821 826
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af6..bf5955b3d873 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
410 pd_claimed = 0; 410 pd_claimed = 0;
411 phase = NULL; 411 phase = NULL;
412 spin_lock_irqsave(&pd_lock, saved_flags); 412 spin_lock_irqsave(&pd_lock, saved_flags);
413 end_request(pd_req, res); 413 if (!__blk_end_request_cur(pd_req,
414 pd_req = elv_next_request(pd_queue); 414 res == Ok ? 0 : -EIO)) {
415 if (!pd_req) 415 pd_req = blk_fetch_request(pd_queue);
416 stop = 1; 416 if (!pd_req)
417 stop = 1;
418 }
417 spin_unlock_irqrestore(&pd_lock, saved_flags); 419 spin_unlock_irqrestore(&pd_lock, saved_flags);
418 if (stop) 420 if (stop)
419 return; 421 return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
443 445
444 pd_cmd = rq_data_dir(pd_req); 446 pd_cmd = rq_data_dir(pd_req);
445 if (pd_cmd == READ || pd_cmd == WRITE) { 447 if (pd_cmd == READ || pd_cmd == WRITE) {
446 pd_block = pd_req->sector; 448 pd_block = blk_rq_pos(pd_req);
447 pd_count = pd_req->current_nr_sectors; 449 pd_count = blk_rq_cur_sectors(pd_req);
448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 450 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
449 return Fail; 451 return Fail;
450 pd_run = pd_req->nr_sectors; 452 pd_run = blk_rq_sectors(pd_req);
451 pd_buf = pd_req->buffer; 453 pd_buf = pd_req->buffer;
452 pd_retries = 0; 454 pd_retries = 0;
453 if (pd_cmd == READ) 455 if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
477 if (pd_count) 479 if (pd_count)
478 return 0; 480 return 0;
479 spin_lock_irqsave(&pd_lock, saved_flags); 481 spin_lock_irqsave(&pd_lock, saved_flags);
480 end_request(pd_req, 1); 482 __blk_end_request_cur(pd_req, 0);
481 pd_count = pd_req->current_nr_sectors; 483 pd_count = blk_rq_cur_sectors(pd_req);
482 pd_buf = pd_req->buffer; 484 pd_buf = pd_req->buffer;
483 spin_unlock_irqrestore(&pd_lock, saved_flags); 485 spin_unlock_irqrestore(&pd_lock, saved_flags);
484 return 0; 486 return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
702{ 704{
703 if (pd_req) 705 if (pd_req)
704 return; 706 return;
705 pd_req = elv_next_request(q); 707 pd_req = blk_fetch_request(q);
706 if (!pd_req) 708 if (!pd_req)
707 return; 709 return;
708 710
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3e..68a90834e993 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate) 753static void pf_end_request(int err)
754{ 754{
755 if (pf_req) { 755 if (pf_req && !__blk_end_request_cur(pf_req, err))
756 end_request(pf_req, uptodate);
757 pf_req = NULL; 756 pf_req = NULL;
758 }
759} 757}
760 758
761static void do_pf_request(struct request_queue * q) 759static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
763 if (pf_busy) 761 if (pf_busy)
764 return; 762 return;
765repeat: 763repeat:
766 pf_req = elv_next_request(q); 764 if (!pf_req) {
767 if (!pf_req) 765 pf_req = blk_fetch_request(q);
768 return; 766 if (!pf_req)
767 return;
768 }
769 769
770 pf_current = pf_req->rq_disk->private_data; 770 pf_current = pf_req->rq_disk->private_data;
771 pf_block = pf_req->sector; 771 pf_block = blk_rq_pos(pf_req);
772 pf_run = pf_req->nr_sectors; 772 pf_run = blk_rq_sectors(pf_req);
773 pf_count = pf_req->current_nr_sectors; 773 pf_count = blk_rq_cur_sectors(pf_req);
774 774
775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
776 pf_end_request(0); 776 pf_end_request(-EIO);
777 goto repeat; 777 goto repeat;
778 } 778 }
779 779
@@ -788,7 +788,7 @@ repeat:
788 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
789 else { 789 else {
790 pf_busy = 0; 790 pf_busy = 0;
791 pf_end_request(0); 791 pf_end_request(-EIO);
792 goto repeat; 792 goto repeat;
793 } 793 }
794} 794}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
805 return 1; 805 return 1;
806 if (!pf_count) { 806 if (!pf_count) {
807 spin_lock_irqsave(&pf_spin_lock, saved_flags); 807 spin_lock_irqsave(&pf_spin_lock, saved_flags);
808 pf_end_request(1); 808 pf_end_request(0);
809 pf_req = elv_next_request(pf_queue);
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 809 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req) 810 if (!pf_req)
812 return 1; 811 return 1;
813 pf_count = pf_req->current_nr_sectors; 812 pf_count = blk_rq_cur_sectors(pf_req);
814 pf_buf = pf_req->buffer; 813 pf_buf = pf_req->buffer;
815 } 814 }
816 return 0; 815 return 0;
817} 816}
818 817
819static inline void next_request(int success) 818static inline void next_request(int err)
820{ 819{
821 unsigned long saved_flags; 820 unsigned long saved_flags;
822 821
823 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
824 pf_end_request(success); 823 pf_end_request(err);
825 pf_busy = 0; 824 pf_busy = 0;
826 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
827 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
844 pi_do_claimed(pf_current->pi, do_pf_read_start); 843 pi_do_claimed(pf_current->pi, do_pf_read_start);
845 return; 844 return;
846 } 845 }
847 next_request(0); 846 next_request(-EIO);
848 return; 847 return;
849 } 848 }
850 pf_mask = STAT_DRQ; 849 pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
863 pi_do_claimed(pf_current->pi, do_pf_read_start); 862 pi_do_claimed(pf_current->pi, do_pf_read_start);
864 return; 863 return;
865 } 864 }
866 next_request(0); 865 next_request(-EIO);
867 return; 866 return;
868 } 867 }
869 pi_read_block(pf_current->pi, pf_buf, 512); 868 pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
871 break; 870 break;
872 } 871 }
873 pi_disconnect(pf_current->pi); 872 pi_disconnect(pf_current->pi);
874 next_request(1); 873 next_request(0);
875} 874}
876 875
877static void do_pf_write(void) 876static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
890 pi_do_claimed(pf_current->pi, do_pf_write_start); 889 pi_do_claimed(pf_current->pi, do_pf_write_start);
891 return; 890 return;
892 } 891 }
893 next_request(0); 892 next_request(-EIO);
894 return; 893 return;
895 } 894 }
896 895
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
903 pi_do_claimed(pf_current->pi, do_pf_write_start); 902 pi_do_claimed(pf_current->pi, do_pf_write_start);
904 return; 903 return;
905 } 904 }
906 next_request(0); 905 next_request(-EIO);
907 return; 906 return;
908 } 907 }
909 pi_write_block(pf_current->pi, pf_buf, 512); 908 pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
923 pi_do_claimed(pf_current->pi, do_pf_write_start); 922 pi_do_claimed(pf_current->pi, do_pf_write_start);
924 return; 923 return;
925 } 924 }
926 next_request(0); 925 next_request(-EIO);
927 return; 926 return;
928 } 927 }
929 pi_disconnect(pf_current->pi); 928 pi_disconnect(pf_current->pi);
930 next_request(1); 929 next_request(0);
931} 930}
932 931
933static int __init pf_init(void) 932static int __init pf_init(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da2..d57f11759480 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
991 */ 991 */
992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
993{ 993{
994 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 994 if ((pd->settings.size << 9) / CD_FRAMESIZE
995 <= queue_max_phys_segments(q)) {
995 /* 996 /*
996 * The cdrom device can handle one segment/frame 997 * The cdrom device can handle one segment/frame
997 */ 998 */
998 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 999 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
999 return 0; 1000 return 0;
1000 } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { 1001 } else if ((pd->settings.size << 9) / PAGE_SIZE
1002 <= queue_max_phys_segments(q)) {
1001 /* 1003 /*
1002 * We can handle this case at the expense of some extra memory 1004 * We can handle this case at the expense of some extra memory
1003 * copies during write operations 1005 * copies during write operations
@@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
2657 struct request_queue *q = pd->disk->queue; 2659 struct request_queue *q = pd->disk->queue;
2658 2660
2659 blk_queue_make_request(q, pkt_make_request); 2661 blk_queue_make_request(q, pkt_make_request);
2660 blk_queue_hardsect_size(q, CD_FRAMESIZE); 2662 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2663 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2662 blk_queue_merge_bvec(q, pkt_merge_bvec); 2664 blk_queue_merge_bvec(q, pkt_merge_bvec);
2663 q->queuedata = pd; 2665 q->queuedata = pd;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb9212..aaeeb544228a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
134 rq_for_each_segment(bv, req, iter) 134 rq_for_each_segment(bv, req, iter)
135 n++; 135 n++;
136 dev_dbg(&dev->sbd.core, 136 dev_dbg(&dev->sbd.core,
137 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 137 "%s:%u: %s req has %u bvecs for %u sectors\n",
138 __func__, __LINE__, op, n, req->nr_sectors, 138 __func__, __LINE__, op, n, blk_rq_sectors(req));
139 req->hard_nr_sectors);
140#endif 139#endif
141 140
142 start_sector = req->sector * priv->blocking_factor; 141 start_sector = blk_rq_pos(req) * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 142 sectors = blk_rq_sectors(req) * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 144 __func__, __LINE__, op, sectors, start_sector);
146 145
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
158 if (res) { 157 if (res) {
159 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, 158 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
160 __LINE__, op, res); 159 __LINE__, op, res);
161 end_request(req, 0); 160 __blk_end_request_all(req, -EIO);
162 return 0; 161 return 0;
163 } 162 }
164 163
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
180 if (res) { 179 if (res) {
181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", 180 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
182 __func__, __LINE__, res); 181 __func__, __LINE__, res);
183 end_request(req, 0); 182 __blk_end_request_all(req, -EIO);
184 return 0; 183 return 0;
185 } 184 }
186 185
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
195 194
196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 195 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
197 196
198 while ((req = elv_next_request(q))) { 197 while ((req = blk_fetch_request(q))) {
199 if (blk_fs_request(req)) { 198 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 199 if (ps3disk_submit_request_sg(dev, req))
201 break; 200 break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
205 break; 204 break;
206 } else { 205 } else {
207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 206 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
208 end_request(req, 0); 207 __blk_end_request_all(req, -EIO);
209 continue; 208 continue;
210 } 209 }
211 } 210 }
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
231 struct request *req; 230 struct request *req;
232 int res, read, error; 231 int res, read, error;
233 u64 tag, status; 232 u64 tag, status;
234 unsigned long num_sectors;
235 const char *op; 233 const char *op;
236 234
237 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); 235 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 259 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) { 260 req->cmd[0] == REQ_LB_OP_FLUSH) {
263 read = 0; 261 read = 0;
264 num_sectors = req->hard_cur_sectors;
265 op = "flush"; 262 op = "flush";
266 } else { 263 } else {
267 read = !rq_data_dir(req); 264 read = !rq_data_dir(req);
268 num_sectors = req->nr_sectors;
269 op = read ? "read" : "write"; 265 op = read ? "read" : "write";
270 } 266 }
271 if (status) { 267 if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
281 } 277 }
282 278
283 spin_lock(&priv->lock); 279 spin_lock(&priv->lock);
284 __blk_end_request(req, error, num_sectors << 9); 280 __blk_end_request_all(req, error);
285 priv->req = NULL; 281 priv->req = NULL;
286 ps3disk_do_request(dev, priv->queue); 282 ps3disk_do_request(dev, priv->queue);
287 spin_unlock(&priv->lock); 283 spin_unlock(&priv->lock);
@@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
481 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 477 blk_queue_max_sectors(queue, dev->bounce_size >> 9);
482 blk_queue_segment_boundary(queue, -1UL); 478 blk_queue_segment_boundary(queue, -1UL);
483 blk_queue_dma_alignment(queue, dev->blk_size-1); 479 blk_queue_dma_alignment(queue, dev->blk_size-1);
484 blk_queue_hardsect_size(queue, dev->blk_size); 480 blk_queue_logical_block_size(queue, dev->blk_size);
485 481
486 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
487 ps3disk_prepare_flush); 483 ps3disk_prepare_flush);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe63..cbfd9c0aef03 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213} 213}
214 214
215static void vdc_end_request(struct request *req, int error, int num_sectors)
216{
217 __blk_end_request(req, error, num_sectors << 9);
218}
219
220static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
221 unsigned int index) 216 unsigned int index)
222{ 217{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
239 234
240 rqe->req = NULL; 235 rqe->req = NULL;
241 236
242 vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); 237 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
243 238
244 if (blk_queue_stopped(port->disk->queue)) 239 if (blk_queue_stopped(port->disk->queue))
245 blk_start_queue(port->disk->queue); 240 blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
421 desc->slice = 0; 416 desc->slice = 0;
422 } 417 }
423 desc->status = ~0; 418 desc->status = ~0;
424 desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
425 desc->size = len; 420 desc->size = len;
426 desc->ncookies = err; 421 desc->ncookies = err;
427 422
@@ -446,14 +441,13 @@ out:
446static void do_vdc_request(struct request_queue *q) 441static void do_vdc_request(struct request_queue *q)
447{ 442{
448 while (1) { 443 while (1) {
449 struct request *req = elv_next_request(q); 444 struct request *req = blk_fetch_request(q);
450 445
451 if (!req) 446 if (!req)
452 break; 447 break;
453 448
454 blkdev_dequeue_request(req);
455 if (__send_request(req) < 0) 449 if (__send_request(req) < 0)
456 vdc_end_request(req, -EIO, req->hard_nr_sectors); 450 __blk_end_request_all(req, -EIO);
457 } 451 }
458} 452}
459 453
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc3856937..cf7877fb8a7d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
514 ret = swim_read_sector(fs, side, track, sector, 514 ret = swim_read_sector(fs, side, track, sector,
515 buffer); 515 buffer);
516 if (try-- == 0) 516 if (try-- == 0)
517 return -1; 517 return -EIO;
518 } while (ret != 512); 518 } while (ret != 512);
519 519
520 buffer += ret; 520 buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
528 struct request *req; 528 struct request *req;
529 struct floppy_state *fs; 529 struct floppy_state *fs;
530 530
531 while ((req = elv_next_request(q))) { 531 req = blk_fetch_request(q);
532 while (req) {
533 int err = -EIO;
532 534
533 fs = req->rq_disk->private_data; 535 fs = req->rq_disk->private_data;
534 if (req->sector < 0 || req->sector >= fs->total_secs) { 536 if (blk_rq_pos(req) >= fs->total_secs)
535 end_request(req, 0); 537 goto done;
536 continue; 538 if (!fs->disk_in)
537 } 539 goto done;
538 if (req->current_nr_sectors == 0) { 540 if (rq_data_dir(req) == WRITE && fs->write_protected)
539 end_request(req, 1); 541 goto done;
540 continue; 542
541 }
542 if (!fs->disk_in) {
543 end_request(req, 0);
544 continue;
545 }
546 if (rq_data_dir(req) == WRITE) {
547 if (fs->write_protected) {
548 end_request(req, 0);
549 continue;
550 }
551 }
552 switch (rq_data_dir(req)) { 543 switch (rq_data_dir(req)) {
553 case WRITE: 544 case WRITE:
554 /* NOT IMPLEMENTED */ 545 /* NOT IMPLEMENTED */
555 end_request(req, 0);
556 break; 546 break;
557 case READ: 547 case READ:
558 if (floppy_read_sectors(fs, req->sector, 548 err = floppy_read_sectors(fs, blk_rq_pos(req),
559 req->current_nr_sectors, 549 blk_rq_cur_sectors(req),
560 req->buffer)) { 550 req->buffer);
561 end_request(req, 0);
562 continue;
563 }
564 req->nr_sectors -= req->current_nr_sectors;
565 req->sector += req->current_nr_sectors;
566 req->buffer += req->current_nr_sectors * 512;
567 end_request(req, 1);
568 break; 551 break;
569 } 552 }
553 done:
554 if (!__blk_end_request_cur(req, err))
555 req = blk_fetch_request(q);
570 } 556 }
571} 557}
572 558
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba0..80df93e3cdd0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
254static bool swim3_end_request(int err, unsigned int nr_bytes)
255{
256 if (__blk_end_request(fd_req, err, nr_bytes))
257 return true;
258
259 fd_req = NULL;
260 return false;
261}
262
263static bool swim3_end_request_cur(int err)
264{
265 return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
266}
267
254static void swim3_select(struct floppy_state *fs, int sel) 268static void swim3_select(struct floppy_state *fs, int sel)
255{ 269{
256 struct swim3 __iomem *sw = fs->swim3; 270 struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
310 wake_up(&fs->wait); 324 wake_up(&fs->wait);
311 return; 325 return;
312 } 326 }
313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 327 while (fs->state == idle) {
328 if (!fd_req) {
329 fd_req = blk_fetch_request(swim3_queue);
330 if (!fd_req)
331 break;
332 }
333 req = fd_req;
314#if 0 334#if 0
315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 335 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
316 req->rq_disk->disk_name, req->cmd, 336 req->rq_disk->disk_name, req->cmd,
317 (long)req->sector, req->nr_sectors, req->buffer); 337 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
318 printk(" errors=%d current_nr_sectors=%ld\n", 338 printk(" errors=%d current_nr_sectors=%u\n",
319 req->errors, req->current_nr_sectors); 339 req->errors, blk_rq_cur_sectors(req));
320#endif 340#endif
321 341
322 if (req->sector < 0 || req->sector >= fs->total_secs) { 342 if (blk_rq_pos(req) >= fs->total_secs) {
323 end_request(req, 0); 343 swim3_end_request_cur(-EIO);
324 continue;
325 }
326 if (req->current_nr_sectors == 0) {
327 end_request(req, 1);
328 continue; 344 continue;
329 } 345 }
330 if (fs->ejected) { 346 if (fs->ejected) {
331 end_request(req, 0); 347 swim3_end_request_cur(-EIO);
332 continue; 348 continue;
333 } 349 }
334 350
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
336 if (fs->write_prot < 0) 352 if (fs->write_prot < 0)
337 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 353 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
338 if (fs->write_prot) { 354 if (fs->write_prot) {
339 end_request(req, 0); 355 swim3_end_request_cur(-EIO);
340 continue; 356 continue;
341 } 357 }
342 } 358 }
343 359
344 /* Do not remove the cast. req->sector is now a sector_t and 360 /* Do not remove the cast. blk_rq_pos(req) is now a
345 * can be 64 bits, but it will never go past 32 bits for this 361 * sector_t and can be 64 bits, but it will never go
346 * driver anyway, so we can safely cast it down and not have 362 * past 32 bits for this driver anyway, so we can
347 * to do a 64/32 division 363 * safely cast it down and not have to do a 64/32
364 * division
348 */ 365 */
349 fs->req_cyl = ((long)req->sector) / fs->secpercyl; 366 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
350 x = ((long)req->sector) % fs->secpercyl; 367 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
351 fs->head = x / fs->secpertrack; 368 fs->head = x / fs->secpertrack;
352 fs->req_sector = x % fs->secpertrack + 1; 369 fs->req_sector = x % fs->secpertrack + 1;
353 fd_req = req; 370 fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
424 struct dbdma_cmd *cp = fs->dma_cmd; 441 struct dbdma_cmd *cp = fs->dma_cmd;
425 struct dbdma_regs __iomem *dr = fs->dma; 442 struct dbdma_regs __iomem *dr = fs->dma;
426 443
427 if (fd_req->current_nr_sectors <= 0) { 444 if (blk_rq_cur_sectors(fd_req) <= 0) {
428 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 445 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
429 return; 446 return;
430 } 447 }
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
432 n = 1; 449 n = 1;
433 else { 450 else {
434 n = fs->secpertrack - fs->req_sector + 1; 451 n = fs->secpertrack - fs->req_sector + 1;
435 if (n > fd_req->current_nr_sectors) 452 if (n > blk_rq_cur_sectors(fd_req))
436 n = fd_req->current_nr_sectors; 453 n = blk_rq_cur_sectors(fd_req);
437 } 454 }
438 fs->scount = n; 455 fs->scount = n;
439 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 456 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
508 case do_transfer: 525 case do_transfer:
509 if (fs->cur_cyl != fs->req_cyl) { 526 if (fs->cur_cyl != fs->req_cyl) {
510 if (fs->retries > 5) { 527 if (fs->retries > 5) {
511 end_request(fd_req, 0); 528 swim3_end_request_cur(-EIO);
512 fs->state = idle; 529 fs->state = idle;
513 return; 530 return;
514 } 531 }
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
540 out_8(&sw->intr_enable, 0); 557 out_8(&sw->intr_enable, 0);
541 fs->cur_cyl = -1; 558 fs->cur_cyl = -1;
542 if (fs->retries > 5) { 559 if (fs->retries > 5) {
543 end_request(fd_req, 0); 560 swim3_end_request_cur(-EIO);
544 fs->state = idle; 561 fs->state = idle;
545 start_request(fs); 562 start_request(fs);
546 } else { 563 } else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
559 out_8(&sw->select, RELAX); 576 out_8(&sw->select, RELAX);
560 out_8(&sw->intr_enable, 0); 577 out_8(&sw->intr_enable, 0);
561 printk(KERN_ERR "swim3: seek timeout\n"); 578 printk(KERN_ERR "swim3: seek timeout\n");
562 end_request(fd_req, 0); 579 swim3_end_request_cur(-EIO);
563 fs->state = idle; 580 fs->state = idle;
564 start_request(fs); 581 start_request(fs);
565} 582}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
583 return; 600 return;
584 } 601 }
585 printk(KERN_ERR "swim3: seek settle timeout\n"); 602 printk(KERN_ERR "swim3: seek settle timeout\n");
586 end_request(fd_req, 0); 603 swim3_end_request_cur(-EIO);
587 fs->state = idle; 604 fs->state = idle;
588 start_request(fs); 605 start_request(fs);
589} 606}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
593 struct floppy_state *fs = (struct floppy_state *) data; 610 struct floppy_state *fs = (struct floppy_state *) data;
594 struct swim3 __iomem *sw = fs->swim3; 611 struct swim3 __iomem *sw = fs->swim3;
595 struct dbdma_regs __iomem *dr = fs->dma; 612 struct dbdma_regs __iomem *dr = fs->dma;
596 struct dbdma_cmd *cp = fs->dma_cmd;
597 unsigned long s;
598 int n; 613 int n;
599 614
600 fs->timeout_pending = 0; 615 fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
605 out_8(&sw->intr_enable, 0); 620 out_8(&sw->intr_enable, 0);
606 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 621 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
607 out_8(&sw->select, RELAX); 622 out_8(&sw->select, RELAX);
608 if (rq_data_dir(fd_req) == WRITE)
609 ++cp;
610 if (ld_le16(&cp->xfer_status) != 0)
611 s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
612 else
613 s = 0;
614 fd_req->sector += s;
615 fd_req->current_nr_sectors -= s;
616 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 623 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
617 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 624 (rq_data_dir(fd_req)==WRITE? "writ": "read"),
618 end_request(fd_req, 0); 625 (long)blk_rq_pos(fd_req));
626 swim3_end_request_cur(-EIO);
619 fs->state = idle; 627 fs->state = idle;
620 start_request(fs); 628 start_request(fs);
621} 629}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
646 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 654 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
647 fs->cur_cyl = -1; 655 fs->cur_cyl = -1;
648 if (fs->retries > 5) { 656 if (fs->retries > 5) {
649 end_request(fd_req, 0); 657 swim3_end_request_cur(-EIO);
650 fs->state = idle; 658 fs->state = idle;
651 start_request(fs); 659 start_request(fs);
652 } else { 660 } else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
719 if (intr & ERROR_INTR) { 727 if (intr & ERROR_INTR) {
720 n = fs->scount - 1 - resid / 512; 728 n = fs->scount - 1 - resid / 512;
721 if (n > 0) { 729 if (n > 0) {
722 fd_req->sector += n; 730 blk_update_request(fd_req, 0, n << 9);
723 fd_req->current_nr_sectors -= n;
724 fd_req->buffer += n * 512;
725 fs->req_sector += n; 731 fs->req_sector += n;
726 } 732 }
727 if (fs->retries < 5) { 733 if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
730 } else { 736 } else {
731 printk("swim3: error %sing block %ld (err=%x)\n", 737 printk("swim3: error %sing block %ld (err=%x)\n",
732 rq_data_dir(fd_req) == WRITE? "writ": "read", 738 rq_data_dir(fd_req) == WRITE? "writ": "read",
733 (long)fd_req->sector, err); 739 (long)blk_rq_pos(fd_req), err);
734 end_request(fd_req, 0); 740 swim3_end_request_cur(-EIO);
735 fs->state = idle; 741 fs->state = idle;
736 } 742 }
737 } else { 743 } else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
740 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 746 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
741 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 747 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
742 fs->state, rq_data_dir(fd_req), intr, err); 748 fs->state, rq_data_dir(fd_req), intr, err);
743 end_request(fd_req, 0); 749 swim3_end_request_cur(-EIO);
744 fs->state = idle; 750 fs->state = idle;
745 start_request(fs); 751 start_request(fs);
746 break; 752 break;
747 } 753 }
748 fd_req->sector += fs->scount; 754 if (swim3_end_request(0, fs->scount << 9)) {
749 fd_req->current_nr_sectors -= fs->scount;
750 fd_req->buffer += fs->scount * 512;
751 if (fd_req->current_nr_sectors <= 0) {
752 end_request(fd_req, 1);
753 fs->state = idle;
754 } else {
755 fs->req_sector += fs->scount; 755 fs->req_sector += fs->scount;
756 if (fs->req_sector > fs->secpertrack) { 756 if (fs->req_sector > fs->secpertrack) {
757 fs->req_sector -= fs->secpertrack; 757 fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
761 } 761 }
762 } 762 }
763 act(fs); 763 act(fs);
764 } 764 } else
765 fs->state = idle;
765 } 766 }
766 if (fs->state == idle) 767 if (fs->state == idle)
767 start_request(fs); 768 start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf03..da403b6a7f43 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = __blk_end_request(req, error, blk_rq_bytes(req)); 752 __blk_end_request_all(req, error);
753 assert(rc == 0);
754 753
755 rc = carm_put_request(host, crq); 754 rc = carm_put_request(host, crq);
756 assert(rc == 0); 755 assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
811 810
812 while (1) { 811 while (1) {
813 DPRINTK("get req\n"); 812 DPRINTK("get req\n");
814 rq = elv_next_request(q); 813 rq = blk_fetch_request(q);
815 if (!rq) 814 if (!rq)
816 break; 815 break;
817 816
818 blkdev_dequeue_request(rq);
819
820 crq = rq->special; 817 crq = rq->special;
821 assert(crq != NULL); 818 assert(crq != NULL);
822 assert(crq->rq == rq); 819 assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
847 844
848queue_one_request: 845queue_one_request:
849 VPRINTK("get req\n"); 846 VPRINTK("get req\n");
850 rq = elv_next_request(q); 847 rq = blk_peek_request(q);
851 if (!rq) 848 if (!rq)
852 return; 849 return;
853 850
@@ -858,7 +855,7 @@ queue_one_request:
858 } 855 }
859 crq->rq = rq; 856 crq->rq = rq;
860 857
861 blkdev_dequeue_request(rq); 858 blk_start_request(rq);
862 859
863 if (rq_data_dir(rq) == WRITE) { 860 if (rq_data_dir(rq) == WRITE) {
864 writing = 1; 861 writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
904 msg->sg_count = n_elem; 901 msg->sg_count = n_elem;
905 msg->sg_type = SGT_32BIT; 902 msg->sg_type = SGT_32BIT;
906 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 903 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
907 msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 904 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
908 tmp = (rq->sector >> 16) >> 16; 905 tmp = (blk_rq_pos(rq) >> 16) >> 16;
909 msg->lba_high = cpu_to_le16( (u16) tmp ); 906 msg->lba_high = cpu_to_le16( (u16) tmp );
910 msg->lba_count = cpu_to_le16(rq->nr_sectors); 907 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
911 908
912 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 909 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
913 for (i = 0; i < n_elem; i++) { 910 for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac890..cc54473b8e77 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq); 361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
363static void ub_end_rq(struct request *rq, unsigned int status, 363static void ub_end_rq(struct request *rq, unsigned int status);
364 unsigned int cmd_len);
365static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
366 struct ub_request *urq, struct ub_scsi_cmd *cmd); 365 struct ub_request *urq, struct ub_scsi_cmd *cmd);
367static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
627 struct ub_lun *lun = q->queuedata; 626 struct ub_lun *lun = q->queuedata;
628 struct request *rq; 627 struct request *rq;
629 628
630 while ((rq = elv_next_request(q)) != NULL) { 629 while ((rq = blk_peek_request(q)) != NULL) {
631 if (ub_request_fn_1(lun, rq) != 0) { 630 if (ub_request_fn_1(lun, rq) != 0) {
632 blk_stop_queue(q); 631 blk_stop_queue(q);
633 break; 632 break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
643 int n_elem; 642 int n_elem;
644 643
645 if (atomic_read(&sc->poison)) { 644 if (atomic_read(&sc->poison)) {
646 blkdev_dequeue_request(rq); 645 blk_start_request(rq);
647 ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); 646 ub_end_rq(rq, DID_NO_CONNECT << 16);
648 return 0; 647 return 0;
649 } 648 }
650 649
651 if (lun->changed && !blk_pc_request(rq)) { 650 if (lun->changed && !blk_pc_request(rq)) {
652 blkdev_dequeue_request(rq); 651 blk_start_request(rq);
653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); 652 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
654 return 0; 653 return 0;
655 } 654 }
656 655
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
660 return -1; 659 return -1;
661 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 660 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
662 661
663 blkdev_dequeue_request(rq); 662 blk_start_request(rq);
664 663
665 urq = &lun->urq; 664 urq = &lun->urq;
666 memset(urq, 0, sizeof(struct ub_request)); 665 memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
702 701
703drop: 702drop:
704 ub_put_cmd(lun, cmd); 703 ub_put_cmd(lun, cmd);
705 ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); 704 ub_end_rq(rq, DID_ERROR << 16);
706 return 0; 705 return 0;
707} 706}
708 707
@@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
723 /* 722 /*
724 * build the command 723 * build the command
725 * 724 *
726 * The call to blk_queue_hardsect_size() guarantees that request 725 * The call to blk_queue_logical_block_size() guarantees that request
727 * is aligned, but it is given in terms of 512 byte units, always. 726 * is aligned, but it is given in terms of 512 byte units, always.
728 */ 727 */
729 block = rq->sector >> lun->capacity.bshift; 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
730 nblks = rq->nr_sectors >> lun->capacity.bshift; 729 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
731 730
732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 731 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 732 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
739 cmd->cdb[8] = nblks; 738 cmd->cdb[8] = nblks;
740 cmd->cdb_len = 10; 739 cmd->cdb_len = 10;
741 740
742 cmd->len = rq->nr_sectors * 512; 741 cmd->len = blk_rq_bytes(rq);
743} 742}
744 743
745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
747{ 746{
748 struct request *rq = urq->rq; 747 struct request *rq = urq->rq;
749 748
750 if (rq->data_len == 0) { 749 if (blk_rq_bytes(rq) == 0) {
751 cmd->dir = UB_DIR_NONE; 750 cmd->dir = UB_DIR_NONE;
752 } else { 751 } else {
753 if (rq_data_dir(rq) == WRITE) 752 if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 761 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
763 cmd->cdb_len = rq->cmd_len; 762 cmd->cdb_len = rq->cmd_len;
764 763
765 cmd->len = rq->data_len; 764 cmd->len = blk_rq_bytes(rq);
766 765
767 /* 766 /*
768 * To reapply this to every URB is not as incorrect as it looks. 767 * To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
777 struct ub_request *urq = cmd->back; 776 struct ub_request *urq = cmd->back;
778 struct request *rq; 777 struct request *rq;
779 unsigned int scsi_status; 778 unsigned int scsi_status;
780 unsigned int cmd_len;
781 779
782 rq = urq->rq; 780 rq = urq->rq;
783 781
784 if (cmd->error == 0) { 782 if (cmd->error == 0) {
785 if (blk_pc_request(rq)) { 783 if (blk_pc_request(rq)) {
786 if (cmd->act_len >= rq->data_len) 784 if (cmd->act_len >= rq->resid_len)
787 rq->data_len = 0; 785 rq->resid_len = 0;
788 else 786 else
789 rq->data_len -= cmd->act_len; 787 rq->resid_len -= cmd->act_len;
790 scsi_status = 0; 788 scsi_status = 0;
791 } else { 789 } else {
792 if (cmd->act_len != cmd->len) { 790 if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
818 816
819 urq->rq = NULL; 817 urq->rq = NULL;
820 818
821 cmd_len = cmd->len;
822 ub_put_cmd(lun, cmd); 819 ub_put_cmd(lun, cmd);
823 ub_end_rq(rq, scsi_status, cmd_len); 820 ub_end_rq(rq, scsi_status);
824 blk_start_queue(lun->disk->queue); 821 blk_start_queue(lun->disk->queue);
825} 822}
826 823
827static void ub_end_rq(struct request *rq, unsigned int scsi_status, 824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
828 unsigned int cmd_len)
829{ 825{
830 int error; 826 int error;
831 long rqlen;
832 827
833 if (scsi_status == 0) { 828 if (scsi_status == 0) {
834 error = 0; 829 error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
836 error = -EIO; 831 error = -EIO;
837 rq->errors = scsi_status; 832 rq->errors = scsi_status;
838 } 833 }
839 rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ 834 __blk_end_request_all(rq, error);
840 if (__blk_end_request(rq, error, cmd_len)) {
841 printk(KERN_WARNING DRV_NAME
842 ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
843 blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
844 }
845} 835}
846 836
847static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
1759 ub_revalidate(lun->udev, lun); 1749 ub_revalidate(lun->udev, lun);
1760 1750
1761 /* XXX Support sector size switching like in sr.c */ 1751 /* XXX Support sector size switching like in sr.c */
1762 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1752 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1763 set_capacity(disk, lun->capacity.nsec); 1753 set_capacity(disk, lun->capacity.nsec);
1764 // set_disk_ro(sdkp->disk, lun->readonly); 1754 // set_disk_ro(sdkp->disk, lun->readonly);
1765 1755
@@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2334 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2335 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2336 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2326 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2337 blk_queue_hardsect_size(q, lun->capacity.bsize); 2327 blk_queue_logical_block_size(q, lun->capacity.bsize);
2338 2328
2339 lun->disk = disk; 2329 lun->disk = disk;
2340 q->queuedata = lun; 2330 q->queuedata = lun;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2f..390d69bb7c48 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
252 struct viodasd_device *d; 252 struct viodasd_device *d;
253 unsigned long flags; 253 unsigned long flags;
254 254
255 start = (u64)req->sector << 9; 255 start = (u64)blk_rq_pos(req) << 9;
256 256
257 if (rq_data_dir(req) == READ) { 257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE; 258 direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
361 * back later. 361 * back later.
362 */ 362 */
363 while (num_req_outstanding < VIOMAXREQ) { 363 while (num_req_outstanding < VIOMAXREQ) {
364 req = elv_next_request(q); 364 req = blk_fetch_request(q);
365 if (req == NULL) 365 if (req == NULL)
366 return; 366 return;
367 /* dequeue the current request from the queue */
368 blkdev_dequeue_request(req);
369 /* check that request contains a valid command */ 367 /* check that request contains a valid command */
370 if (!blk_fs_request(req)) { 368 if (!blk_fs_request(req)) {
371 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 369 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
372 continue; 370 continue;
373 } 371 }
374 /* Try sending the request */ 372 /* Try sending the request */
375 if (send_request(req) != 0) 373 if (send_request(req) != 0)
376 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 374 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
377 } 375 }
378} 376}
379 377
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 588 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 589 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
592 event->xRc, bevent->sub_result, err->msg); 590 event->xRc, bevent->sub_result, err->msg);
593 num_sect = req->hard_nr_sectors; 591 num_sect = blk_rq_sectors(req);
594 } 592 }
595 qlock = req->q->queue_lock; 593 qlock = req->q->queue_lock;
596 spin_lock_irqsave(qlock, irq_flags); 594 spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a87..c0facaa55cf4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
37 struct list_head list; 37 struct list_head list;
38 struct request *req; 38 struct request *req;
39 struct virtio_blk_outhdr out_hdr; 39 struct virtio_blk_outhdr out_hdr;
40 struct virtio_scsi_inhdr in_hdr;
40 u8 status; 41 u8 status;
41}; 42};
42 43
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
50 spin_lock_irqsave(&vblk->lock, flags); 51 spin_lock_irqsave(&vblk->lock, flags);
51 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 52 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
52 int error; 53 int error;
54
53 switch (vbr->status) { 55 switch (vbr->status) {
54 case VIRTIO_BLK_S_OK: 56 case VIRTIO_BLK_S_OK:
55 error = 0; 57 error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
62 break; 64 break;
63 } 65 }
64 66
65 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 67 if (blk_pc_request(vbr->req)) {
68 vbr->req->resid_len = vbr->in_hdr.residual;
69 vbr->req->sense_len = vbr->in_hdr.sense_len;
70 vbr->req->errors = vbr->in_hdr.errors;
71 }
72
73 __blk_end_request_all(vbr->req, error);
66 list_del(&vbr->list); 74 list_del(&vbr->list);
67 mempool_free(vbr, vblk->pool); 75 mempool_free(vbr, vblk->pool);
68 } 76 }
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
74static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 82static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
75 struct request *req) 83 struct request *req)
76{ 84{
77 unsigned long num, out, in; 85 unsigned long num, out = 0, in = 0;
78 struct virtblk_req *vbr; 86 struct virtblk_req *vbr;
79 87
80 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 88 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
85 vbr->req = req; 93 vbr->req = req;
86 if (blk_fs_request(vbr->req)) { 94 if (blk_fs_request(vbr->req)) {
87 vbr->out_hdr.type = 0; 95 vbr->out_hdr.type = 0;
88 vbr->out_hdr.sector = vbr->req->sector; 96 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 97 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
90 } else if (blk_pc_request(vbr->req)) { 98 } else if (blk_pc_request(vbr->req)) {
91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 99 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
99 if (blk_barrier_rq(vbr->req)) 107 if (blk_barrier_rq(vbr->req))
100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 108 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
101 109
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 110 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
105 111
106 if (rq_data_dir(vbr->req) == WRITE) { 112 /*
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 113 * If this is a packet command we need a couple of additional headers.
108 out = 1 + num; 114 * Behind the normal outhdr we put a segment with the scsi command
109 in = 1; 115 * block, and before the normal inhdr we put the sense data and the
110 } else { 116 * inhdr with additional status information before the normal inhdr.
111 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 117 */
112 out = 1; 118 if (blk_pc_request(vbr->req))
113 in = 1 + num; 119 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
120
121 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
122
123 if (blk_pc_request(vbr->req)) {
124 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
125 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
126 sizeof(vbr->in_hdr));
127 }
128
129 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
130 sizeof(vbr->status));
131
132 if (num) {
133 if (rq_data_dir(vbr->req) == WRITE) {
134 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
135 out += num;
136 } else {
137 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
138 in += num;
139 }
114 } 140 }
115 141
116 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { 142 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
124 150
125static void do_virtblk_request(struct request_queue *q) 151static void do_virtblk_request(struct request_queue *q)
126{ 152{
127 struct virtio_blk *vblk = NULL; 153 struct virtio_blk *vblk = q->queuedata;
128 struct request *req; 154 struct request *req;
129 unsigned int issued = 0; 155 unsigned int issued = 0;
130 156
131 while ((req = elv_next_request(q)) != NULL) { 157 while ((req = blk_peek_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 158 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 159
135 /* If this request fails, stop queue and wait for something to 160 /* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
138 blk_stop_queue(q); 163 blk_stop_queue(q);
139 break; 164 break;
140 } 165 }
141 blkdev_dequeue_request(req); 166 blk_start_request(req);
142 issued++; 167 issued++;
143 } 168 }
144 169
@@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
146 vblk->vq->vq_ops->kick(vblk->vq); 171 vblk->vq->vq_ops->kick(vblk->vq);
147} 172}
148 173
174/* return ATA identify data
175 */
176static int virtblk_identify(struct gendisk *disk, void *argp)
177{
178 struct virtio_blk *vblk = disk->private_data;
179 void *opaque;
180 int err = -ENOMEM;
181
182 opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
183 if (!opaque)
184 goto out;
185
186 err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
187 offsetof(struct virtio_blk_config, identify), opaque,
188 VIRTIO_BLK_ID_BYTES);
189
190 if (err)
191 goto out_kfree;
192
193 if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
194 err = -EFAULT;
195
196out_kfree:
197 kfree(opaque);
198out:
199 return err;
200}
201
149static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 202static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
150 unsigned cmd, unsigned long data) 203 unsigned cmd, unsigned long data)
151{ 204{
152 return scsi_cmd_ioctl(bdev->bd_disk->queue, 205 struct gendisk *disk = bdev->bd_disk;
153 bdev->bd_disk, mode, cmd, 206 struct virtio_blk *vblk = disk->private_data;
154 (void __user *)data); 207 void __user *argp = (void __user *)data;
208
209 if (cmd == HDIO_GET_IDENTITY)
210 return virtblk_identify(disk, argp);
211
212 /*
213 * Only allow the generic SCSI ioctls if the host can support it.
214 */
215 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
216 return -ENOIOCTLCMD;
217
218 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
155} 219}
156 220
157/* We provide getgeo only to please some old bootloader/partitioning tools */ 221/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
249 goto out_put_disk; 313 goto out_put_disk;
250 } 314 }
251 315
316 vblk->disk->queue->queuedata = vblk;
252 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); 317 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
253 318
254 if (index < 26) { 319 if (index < 26) {
@@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
313 offsetof(struct virtio_blk_config, blk_size), 378 offsetof(struct virtio_blk_config, blk_size),
314 &blk_size); 379 &blk_size);
315 if (!err) 380 if (!err)
316 blk_queue_hardsect_size(vblk->disk->queue, blk_size); 381 blk_queue_logical_block_size(vblk->disk->queue, blk_size);
317 382
318 add_disk(vblk->disk); 383 add_disk(vblk->disk);
319 return 0; 384 return 0;
@@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
356static unsigned int features[] = { 421static unsigned int features[] = {
357 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 422 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
358 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 423 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
424 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
359}; 425};
360 426
361static struct virtio_driver virtio_blk = { 427static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98b..ce2429219925 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
305 if (xdc_busy) 305 if (xdc_busy)
306 return; 306 return;
307 307
308 while ((req = elv_next_request(q)) != NULL) { 308 req = blk_fetch_request(q);
309 unsigned block = req->sector; 309 while (req) {
310 unsigned count = req->nr_sectors; 310 unsigned block = blk_rq_pos(req);
311 int rw = rq_data_dir(req); 311 unsigned count = blk_rq_cur_sectors(req);
312 XD_INFO *disk = req->rq_disk->private_data; 312 XD_INFO *disk = req->rq_disk->private_data;
313 int res = 0; 313 int res = -EIO;
314 int retry; 314 int retry;
315 315
316 if (!blk_fs_request(req)) { 316 if (!blk_fs_request(req))
317 end_request(req, 0); 317 goto done;
318 continue; 318 if (block + count > get_capacity(req->rq_disk))
319 } 319 goto done;
320 if (block + count > get_capacity(req->rq_disk)) {
321 end_request(req, 0);
322 continue;
323 }
324 if (rw != READ && rw != WRITE) {
325 printk("do_xd_request: unknown request\n");
326 end_request(req, 0);
327 continue;
328 }
329 for (retry = 0; (retry < XD_RETRIES) && !res; retry++) 320 for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
330 res = xd_readwrite(rw, disk, req->buffer, block, count); 321 res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
331 end_request(req, res); /* wrap up, 0 = fail, 1 = success */ 322 block, count);
323 done:
324 /* wrap up, 0 = success, -errno = fail */
325 if (!__blk_end_request_cur(req, res))
326 req = blk_fetch_request(q);
332 } 327 }
333} 328}
334 329
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
418 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); 413 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
419 xd_recalibrate(drive); 414 xd_recalibrate(drive);
420 spin_lock_irq(&xd_lock); 415 spin_lock_irq(&xd_lock);
421 return (0); 416 return -EIO;
422 case 2: 417 case 2:
423 if (sense[0] & 0x30) { 418 if (sense[0] & 0x30) {
424 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); 419 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
439 else 434 else
440 printk(" - no valid disk address\n"); 435 printk(" - no valid disk address\n");
441 spin_lock_irq(&xd_lock); 436 spin_lock_irq(&xd_lock);
442 return (0); 437 return -EIO;
443 } 438 }
444 if (xd_dma_buffer) 439 if (xd_dma_buffer)
445 for (i=0; i < (temp * 0x200); i++) 440 for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
448 count -= temp, buffer += temp * 0x200, block += temp; 443 count -= temp, buffer += temp * 0x200, block += temp;
449 } 444 }
450 spin_lock_irq(&xd_lock); 445 spin_lock_irq(&xd_lock);
451 return (1); 446 return 0;
452} 447}
453 448
454/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ 449/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a6cbf7b808e6..c1996829d5ec 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
122static int get_id_from_freelist(struct blkfront_info *info) 122static int get_id_from_freelist(struct blkfront_info *info)
123{ 123{
124 unsigned long free = info->shadow_free; 124 unsigned long free = info->shadow_free;
125 BUG_ON(free > BLK_RING_SIZE); 125 BUG_ON(free >= BLK_RING_SIZE);
126 info->shadow_free = info->shadow[free].req.id; 126 info->shadow_free = info->shadow[free].req.id;
127 info->shadow[free].req.id = 0x0fffffee; /* debug */ 127 info->shadow[free].req.id = 0x0fffffee; /* debug */
128 return free; 128 return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
231 info->shadow[id].request = (unsigned long)req; 231 info->shadow[id].request = (unsigned long)req;
232 232
233 ring_req->id = id; 233 ring_req->id = id;
234 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
235 ring_req->handle = info->handle; 235 ring_req->handle = info->handle;
236 236
237 ring_req->operation = rq_data_dir(req) ? 237 ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
299 299
300 queued = 0; 300 queued = 0;
301 301
302 while ((req = elv_next_request(rq)) != NULL) { 302 while ((req = blk_peek_request(rq)) != NULL) {
303 info = req->rq_disk->private_data; 303 info = req->rq_disk->private_data;
304 if (!blk_fs_request(req)) {
305 end_request(req, 0);
306 continue;
307 }
308 304
309 if (RING_FULL(&info->ring)) 305 if (RING_FULL(&info->ring))
310 goto wait; 306 goto wait;
311 307
312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 308 blk_start_request(req);
313 "(%u/%li) buffer:%p [%s]\n",
314 req, req->cmd, (unsigned long)req->sector,
315 req->current_nr_sectors,
316 req->nr_sectors, req->buffer,
317 rq_data_dir(req) ? "write" : "read");
318 309
310 if (!blk_fs_request(req)) {
311 __blk_end_request_all(req, -EIO);
312 continue;
313 }
314
315 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
316 "(%u/%u) buffer:%p [%s]\n",
317 req, req->cmd, (unsigned long)blk_rq_pos(req),
318 blk_rq_cur_sectors(req), blk_rq_sectors(req),
319 req->buffer, rq_data_dir(req) ? "write" : "read");
319 320
320 blkdev_dequeue_request(req);
321 if (blkif_queue_request(req)) { 321 if (blkif_queue_request(req)) {
322 blk_requeue_request(rq, req); 322 blk_requeue_request(rq, req);
323wait: 323wait:
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
345 345
346 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */
347 blk_queue_hardsect_size(rq, sector_size); 347 blk_queue_logical_block_size(rq, sector_size);
348 blk_queue_max_sectors(rq, 512); 348 blk_queue_max_sectors(rq, 512);
349 349
350 /* Each segment in a request is up to an aligned page in size. */ 350 /* Each segment in a request is up to an aligned page in size. */
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
551 551
552 for (i = info->ring.rsp_cons; i != rp; i++) { 552 for (i = info->ring.rsp_cons; i != rp; i++) {
553 unsigned long id; 553 unsigned long id;
554 int ret;
555 554
556 bret = RING_GET_RESPONSE(&info->ring, i); 555 bret = RING_GET_RESPONSE(&info->ring, i);
557 id = bret->id; 556 id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 577 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
579 "request: %x\n", bret->status); 578 "request: %x\n", bret->status);
580 579
581 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 580 __blk_end_request_all(req, error);
582 BUG_ON(ret);
583 break; 581 break;
584 default: 582 default:
585 BUG(); 583 BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a93..f08491a3a813 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
463{ 463{
464 struct request *req; 464 struct request *req;
465 465
466 while ((req = elv_next_request(q)) != NULL) { 466 while ((req = blk_peek_request(q)) != NULL) {
467 if (blk_fs_request(req)) 467 if (blk_fs_request(req))
468 break; 468 break;
469 end_request(req, 0); 469 blk_start_request(req);
470 __blk_end_request_all(req, -EIO);
470 } 471 }
471 return req; 472 return req;
472} 473}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
492 set_capacity(ace->gd, 0); 493 set_capacity(ace->gd, 0);
493 dev_info(ace->dev, "No CF in slot\n"); 494 dev_info(ace->dev, "No CF in slot\n");
494 495
495 /* Drop all pending requests */ 496 /* Drop all in-flight and pending requests */
496 while ((req = elv_next_request(ace->queue)) != NULL) 497 if (ace->req) {
497 end_request(req, 0); 498 __blk_end_request_all(ace->req, -EIO);
499 ace->req = NULL;
500 }
501 while ((req = blk_fetch_request(ace->queue)) != NULL)
502 __blk_end_request_all(req, -EIO);
498 503
499 /* Drop back to IDLE state and notify waiters */ 504 /* Drop back to IDLE state and notify waiters */
500 ace->fsm_state = ACE_FSM_STATE_IDLE; 505 ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
642 ace->fsm_state = ACE_FSM_STATE_IDLE; 647 ace->fsm_state = ACE_FSM_STATE_IDLE;
643 break; 648 break;
644 } 649 }
650 blk_start_request(req);
645 651
646 /* Okay, it's a data request, set it up for transfer */ 652 /* Okay, it's a data request, set it up for transfer */
647 dev_dbg(ace->dev, 653 dev_dbg(ace->dev,
648 "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", 654 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
649 (unsigned long long) req->sector, req->hard_nr_sectors, 655 (unsigned long long)blk_rq_pos(req),
650 req->current_nr_sectors, rq_data_dir(req)); 656 blk_rq_sectors(req), blk_rq_cur_sectors(req),
657 rq_data_dir(req));
651 658
652 ace->req = req; 659 ace->req = req;
653 ace->data_ptr = req->buffer; 660 ace->data_ptr = req->buffer;
654 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 661 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
655 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 662 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
656 663
657 count = req->hard_nr_sectors; 664 count = blk_rq_sectors(req);
658 if (rq_data_dir(req)) { 665 if (rq_data_dir(req)) {
659 /* Kick off write request */ 666 /* Kick off write request */
660 dev_dbg(ace->dev, "write data\n"); 667 dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
688 dev_dbg(ace->dev, 695 dev_dbg(ace->dev,
689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 696 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
690 ace->fsm_task, ace->fsm_iter_num, 697 ace->fsm_task, ace->fsm_iter_num,
691 ace->req->current_nr_sectors * 16, 698 blk_rq_cur_sectors(ace->req) * 16,
692 ace->data_count, ace->in_irq); 699 ace->data_count, ace->in_irq);
693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 700 ace_fsm_yield(ace); /* need to poll CFBSY bit */
694 break; 701 break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
697 dev_dbg(ace->dev, 704 dev_dbg(ace->dev,
698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 705 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
699 ace->fsm_task, ace->fsm_iter_num, 706 ace->fsm_task, ace->fsm_iter_num,
700 ace->req->current_nr_sectors * 16, 707 blk_rq_cur_sectors(ace->req) * 16,
701 ace->data_count, ace->in_irq); 708 ace->data_count, ace->in_irq);
702 ace_fsm_yieldirq(ace); 709 ace_fsm_yieldirq(ace);
703 break; 710 break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
717 } 724 }
718 725
719 /* bio finished; is there another one? */ 726 /* bio finished; is there another one? */
720 if (__blk_end_request(ace->req, 0, 727 if (__blk_end_request_cur(ace->req, 0)) {
721 blk_rq_cur_bytes(ace->req))) { 728 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
722 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 729 * blk_rq_sectors(ace->req),
723 * ace->req->hard_nr_sectors, 730 * blk_rq_cur_sectors(ace->req));
724 * ace->req->current_nr_sectors);
725 */ 731 */
726 ace->data_ptr = ace->req->buffer; 732 ace->data_ptr = ace->req->buffer;
727 ace->data_count = ace->req->current_nr_sectors * 16; 733 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
728 ace_fsm_yieldirq(ace); 734 ace_fsm_yieldirq(ace);
729 break; 735 break;
730 } 736 }
@@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
978 ace->queue = blk_init_queue(ace_request, &ace->lock); 984 ace->queue = blk_init_queue(ace_request, &ace->lock);
979 if (ace->queue == NULL) 985 if (ace->queue == NULL)
980 goto err_blk_initq; 986 goto err_blk_initq;
981 blk_queue_hardsect_size(ace->queue, 512); 987 blk_queue_logical_block_size(ace->queue, 512);
982 988
983 /* 989 /*
984 * Allocate and initialize GD structure 990 * Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd3119..4575171e5beb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
70static void do_z2_request(struct request_queue *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73
74 unsigned long start = req->sector << 9; 74 req = blk_fetch_request(q);
75 unsigned long len = req->current_nr_sectors << 9; 75 while (req) {
76 unsigned long start = blk_rq_pos(req) << 9;
77 unsigned long len = blk_rq_cur_bytes(req);
78 int err = 0;
76 79
77 if (start + len > z2ram_size) { 80 if (start + len > z2ram_size) {
78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
79 req->sector, req->current_nr_sectors); 82 blk_rq_pos(req), blk_rq_cur_sectors(req));
80 end_request(req, 0); 83 err = -EIO;
81 continue; 84 goto done;
82 } 85 }
83 while (len) { 86 while (len) {
84 unsigned long addr = start & Z2RAM_CHUNKMASK; 87 unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
93 start += size; 96 start += size;
94 len -= size; 97 len -= size;
95 } 98 }
96 end_request(req, 1); 99 done:
100 if (!__blk_end_request_cur(req, err))
101 req = blk_fetch_request(q);
97 } 102 }
98} 103}
99 104
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef28..71d1b9bab70b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2101 nr = nframes; 2101 nr = nframes;
2102 if (cdi->cdda_method == CDDA_BPC_SINGLE) 2102 if (cdi->cdda_method == CDDA_BPC_SINGLE)
2103 nr = 1; 2103 nr = 1;
2104 if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) 2104 if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
2105 nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; 2105 nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
2106 2106
2107 len = nr * CD_FRAMESIZE_RAW; 2107 len = nr * CD_FRAMESIZE_RAW;
2108 2108
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437b..b5621f27c4be 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
584 list_for_each_safe(elem, next, &gdrom_deferred) { 584 list_for_each_safe(elem, next, &gdrom_deferred) {
585 req = list_entry(elem, struct request, queuelist); 585 req = list_entry(elem, struct request, queuelist);
586 spin_unlock(&gdrom_lock); 586 spin_unlock(&gdrom_lock);
587 block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 587 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
588 block_cnt = req->nr_sectors/GD_TO_BLK; 588 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
632 * before handling ending the request */ 632 * before handling ending the request */
633 spin_lock(&gdrom_lock); 633 spin_lock(&gdrom_lock);
634 list_del_init(&req->queuelist); 634 list_del_init(&req->queuelist);
635 __blk_end_request(req, err, blk_rq_bytes(req)); 635 __blk_end_request_all(req, err);
636 } 636 }
637 spin_unlock(&gdrom_lock); 637 spin_unlock(&gdrom_lock);
638 kfree(read_command); 638 kfree(read_command);
639} 639}
640 640
641static void gdrom_request_handler_dma(struct request *req)
642{
643 /* dequeue, add to list of deferred work
644 * and then schedule workqueue */
645 blkdev_dequeue_request(req);
646 list_add_tail(&req->queuelist, &gdrom_deferred);
647 schedule_work(&work);
648}
649
650static void gdrom_request(struct request_queue *rq) 641static void gdrom_request(struct request_queue *rq)
651{ 642{
652 struct request *req; 643 struct request *req;
653 644
654 while ((req = elv_next_request(rq)) != NULL) { 645 while ((req = blk_fetch_request(rq)) != NULL) {
655 if (!blk_fs_request(req)) { 646 if (!blk_fs_request(req)) {
656 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
657 end_request(req, 0); 648 __blk_end_request_all(req, -EIO);
649 continue;
658 } 650 }
659 if (rq_data_dir(req) != READ) { 651 if (rq_data_dir(req) != READ) {
660 printk(KERN_NOTICE "GDROM: Read only device -"); 652 printk(KERN_NOTICE "GDROM: Read only device -");
661 printk(" write request ignored\n"); 653 printk(" write request ignored\n");
662 end_request(req, 0); 654 __blk_end_request_all(req, -EIO);
655 continue;
663 } 656 }
664 if (req->nr_sectors) 657
665 gdrom_request_handler_dma(req); 658 /*
666 else 659 * Add to list of deferred work and then schedule
667 end_request(req, 0); 660 * workqueue.
661 */
662 list_add_tail(&req->queuelist, &gdrom_deferred);
663 schedule_work(&work);
668 } 664 }
669} 665}
670 666
@@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
743 739
744static int __devinit probe_gdrom_setupqueue(void) 740static int __devinit probe_gdrom_setupqueue(void)
745{ 741{
746 blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
747 /* using DMA so memory will need to be contiguous */ 743 /* using DMA so memory will need to be contiguous */
748 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 744 blk_queue_max_hw_segments(gd.gdrom_rq, 1);
749 /* set a large max size to get most from DMA */ 745 /* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9b1624e0ddeb..0fff646cc2f0 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
282 viopath_targetinst(viopath_hostLp), 282 viopath_targetinst(viopath_hostLp),
283 (u64)req, VIOVERSION << 16, 283 (u64)req, VIOVERSION << 16,
284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
285 (u64)req->sector * 512, len, 0); 285 (u64)blk_rq_pos(req) * 512, len, 0);
286 if (hvrc != HvLpEvent_Rc_Good) { 286 if (hvrc != HvLpEvent_Rc_Good) {
287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
288 return -1; 288 return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
291 return 0; 291 return 0;
292} 292}
293 293
294static void viocd_end_request(struct request *req, int error)
295{
296 int nsectors = req->hard_nr_sectors;
297
298 /*
299 * Make sure it's fully ended, and ensure that we process
300 * at least one sector.
301 */
302 if (blk_pc_request(req))
303 nsectors = (req->data_len + 511) >> 9;
304 if (!nsectors)
305 nsectors = 1;
306
307 if (__blk_end_request(req, error, nsectors << 9))
308 BUG();
309}
310
311static int rwreq; 294static int rwreq;
312 295
313static void do_viocd_request(struct request_queue *q) 296static void do_viocd_request(struct request_queue *q)
314{ 297{
315 struct request *req; 298 struct request *req;
316 299
317 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
318 if (!blk_fs_request(req)) 301 if (!blk_fs_request(req))
319 viocd_end_request(req, -EIO); 302 __blk_end_request_all(req, -EIO);
320 else if (send_request(req) < 0) { 303 else if (send_request(req) < 0) {
321 printk(VIOCD_KERN_WARNING 304 printk(VIOCD_KERN_WARNING
322 "unable to send message to OS/400!"); 305 "unable to send message to OS/400!");
323 viocd_end_request(req, -EIO); 306 __blk_end_request_all(req, -EIO);
324 } else 307 } else
325 rwreq++; 308 rwreq++;
326 } 309 }
@@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
486 case viocdopen: 469 case viocdopen:
487 if (event->xRc == 0) { 470 if (event->xRc == 0) {
488 di = &viocd_diskinfo[bevent->disk]; 471 di = &viocd_diskinfo[bevent->disk];
489 blk_queue_hardsect_size(di->viocd_disk->queue, 472 blk_queue_logical_block_size(di->viocd_disk->queue,
490 bevent->block_size); 473 bevent->block_size);
491 set_capacity(di->viocd_disk, 474 set_capacity(di->viocd_disk,
492 bevent->media_size * 475 bevent->media_size *
493 bevent->block_size / 512); 476 bevent->block_size / 512);
@@ -531,9 +514,9 @@ return_complete:
531 "with rc %d:0x%04X: %s\n", 514 "with rc %d:0x%04X: %s\n",
532 req, event->xRc, 515 req, event->xRc,
533 bevent->sub_result, err->msg); 516 bevent->sub_result, err->msg);
534 viocd_end_request(req, -EIO); 517 __blk_end_request_all(req, -EIO);
535 } else 518 } else
536 viocd_end_request(req, 0); 519 __blk_end_request_all(req, 0);
537 520
538 /* restart handling of incoming requests */ 521 /* restart handling of incoming requests */
539 spin_unlock_irqrestore(&viocd_reqlock, flags); 522 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e50..db32f0e4c7dd 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
71 err = bd_claim(bdev, raw_open); 71 err = bd_claim(bdev, raw_open);
72 if (err) 72 if (err)
73 goto out1; 73 goto out1;
74 err = set_blocksize(bdev, bdev_hardsect_size(bdev)); 74 err = set_blocksize(bdev, bdev_logical_block_size(bdev));
75 if (err) 75 if (err)
76 goto out2; 76 goto out2;
77 filp->f_flags |= O_DIRECT; 77 filp->f_flags |= O_DIRECT;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index afe5a4323879..757e5956b132 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -246,6 +246,7 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
246 */ 246 */
247void ide_retry_pc(ide_drive_t *drive) 247void ide_retry_pc(ide_drive_t *drive)
248{ 248{
249 struct request *failed_rq = drive->hwif->rq;
249 struct request *sense_rq = &drive->sense_rq; 250 struct request *sense_rq = &drive->sense_rq;
250 struct ide_atapi_pc *pc = &drive->request_sense_pc; 251 struct ide_atapi_pc *pc = &drive->request_sense_pc;
251 252
@@ -255,13 +256,22 @@ void ide_retry_pc(ide_drive_t *drive)
255 ide_init_pc(pc); 256 ide_init_pc(pc);
256 memcpy(pc->c, sense_rq->cmd, 12); 257 memcpy(pc->c, sense_rq->cmd, 12);
257 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */ 258 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
258 pc->req_xfer = sense_rq->data_len; 259 pc->req_xfer = blk_rq_bytes(sense_rq);
259 260
260 if (drive->media == ide_tape) 261 if (drive->media == ide_tape)
261 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 262 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
262 263
263 if (ide_queue_sense_rq(drive, pc)) 264 /*
264 ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq)); 265 * Push back the failed request and put request sense on top
266 * of it. The failed command will be retried after sense data
267 * is acquired.
268 */
269 blk_requeue_request(failed_rq->q, failed_rq);
270 drive->hwif->rq = NULL;
271 if (ide_queue_sense_rq(drive, pc)) {
272 blk_start_request(failed_rq);
273 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
274 }
265} 275}
266EXPORT_SYMBOL_GPL(ide_retry_pc); 276EXPORT_SYMBOL_GPL(ide_retry_pc);
267 277
@@ -303,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
303 return 32768; 313 return 32768;
304 else if (blk_sense_request(rq) || blk_pc_request(rq) || 314 else if (blk_sense_request(rq) || blk_pc_request(rq) ||
305 rq->cmd_type == REQ_TYPE_ATA_PC) 315 rq->cmd_type == REQ_TYPE_ATA_PC)
306 return rq->data_len; 316 return blk_rq_bytes(rq);
307 else 317 else
308 return 0; 318 return 0;
309} 319}
@@ -367,7 +377,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
367 /* No more interrupts */ 377 /* No more interrupts */
368 if ((stat & ATA_DRQ) == 0) { 378 if ((stat & ATA_DRQ) == 0) {
369 int uptodate, error; 379 int uptodate, error;
370 unsigned int done;
371 380
372 debug_log("Packet command completed, %d bytes transferred\n", 381 debug_log("Packet command completed, %d bytes transferred\n",
373 pc->xferred); 382 pc->xferred);
@@ -431,7 +440,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
431 error = uptodate ? 0 : -EIO; 440 error = uptodate ? 0 : -EIO;
432 } 441 }
433 442
434 ide_complete_rq(drive, error, done); 443 ide_complete_rq(drive, error, blk_rq_bytes(rq));
435 return ide_stopped; 444 return ide_stopped;
436 } 445 }
437 446
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a75e4ee1cd17..424140c6c400 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
182 (sense->information[2] << 8) | 182 (sense->information[2] << 8) |
183 (sense->information[3]); 183 (sense->information[3]);
184 184
185 if (drive->queue->hardsect_size == 2048) 185 if (queue_logical_block_size(drive->queue) == 2048)
186 /* device sector size is 2K */ 186 /* device sector size is 2K */
187 sector <<= 2; 187 sector <<= 2;
188 188
@@ -404,15 +404,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
404 404
405end_request: 405end_request:
406 if (stat & ATA_ERR) { 406 if (stat & ATA_ERR) {
407 struct request_queue *q = drive->queue;
408 unsigned long flags;
409
410 spin_lock_irqsave(q->queue_lock, flags);
411 blkdev_dequeue_request(rq);
412 spin_unlock_irqrestore(q->queue_lock, flags);
413
414 hwif->rq = NULL; 407 hwif->rq = NULL;
415
416 return ide_queue_sense_rq(drive, rq) ? 2 : 1; 408 return ide_queue_sense_rq(drive, rq) ? 2 : 1;
417 } else 409 } else
418 return 2; 410 return 2;
@@ -518,7 +510,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
518 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 510 error = blk_execute_rq(drive->queue, info->disk, rq, 0);
519 511
520 if (buffer) 512 if (buffer)
521 *bufflen = rq->data_len; 513 *bufflen = rq->resid_len;
522 514
523 flags = rq->cmd_flags; 515 flags = rq->cmd_flags;
524 blk_put_request(rq); 516 blk_put_request(rq);
@@ -576,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
576 struct request *rq = hwif->rq; 568 struct request *rq = hwif->rq;
577 ide_expiry_t *expiry = NULL; 569 ide_expiry_t *expiry = NULL;
578 int dma_error = 0, dma, thislen, uptodate = 0; 570 int dma_error = 0, dma, thislen, uptodate = 0;
579 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors; 571 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
580 int sense = blk_sense_request(rq); 572 int sense = blk_sense_request(rq);
581 unsigned int timeout; 573 unsigned int timeout;
582 u16 len; 574 u16 len;
@@ -706,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
706 698
707out_end: 699out_end:
708 if (blk_pc_request(rq) && rc == 0) { 700 if (blk_pc_request(rq) && rc == 0) {
709 unsigned int dlen = rq->data_len; 701 rq->resid_len = 0;
710 702 blk_end_request_all(rq, 0);
711 rq->data_len = 0;
712
713 if (blk_end_request(rq, 0, dlen))
714 BUG();
715
716 hwif->rq = NULL; 703 hwif->rq = NULL;
717 } else { 704 } else {
718 if (sense && uptodate) 705 if (sense && uptodate)
@@ -730,21 +717,13 @@ out_end:
730 ide_cd_error_cmd(drive, cmd); 717 ide_cd_error_cmd(drive, cmd);
731 718
732 /* make sure it's fully ended */ 719 /* make sure it's fully ended */
733 if (blk_pc_request(rq))
734 nsectors = (rq->data_len + 511) >> 9;
735 else
736 nsectors = rq->hard_nr_sectors;
737
738 if (nsectors == 0)
739 nsectors = 1;
740
741 if (blk_fs_request(rq) == 0) { 720 if (blk_fs_request(rq) == 0) {
742 rq->data_len -= (cmd->nbytes - cmd->nleft); 721 rq->resid_len -= cmd->nbytes - cmd->nleft;
743 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 722 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
744 rq->data_len += cmd->last_xfer_len; 723 rq->resid_len += cmd->last_xfer_len;
745 } 724 }
746 725
747 ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); 726 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
748 727
749 if (sense && rc == 2) 728 if (sense && rc == 2)
750 ide_error(drive, "request sense failure", stat); 729 ide_error(drive, "request sense failure", stat);
@@ -758,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
758 struct request_queue *q = drive->queue; 737 struct request_queue *q = drive->queue;
759 int write = rq_data_dir(rq) == WRITE; 738 int write = rq_data_dir(rq) == WRITE;
760 unsigned short sectors_per_frame = 739 unsigned short sectors_per_frame =
761 queue_hardsect_size(q) >> SECTOR_BITS; 740 queue_logical_block_size(q) >> SECTOR_BITS;
762 741
763 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " 742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
764 "secs_per_frame: %u", 743 "secs_per_frame: %u",
@@ -777,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
777 } 756 }
778 757
779 /* fs requests *must* be hardware frame aligned */ 758 /* fs requests *must* be hardware frame aligned */
780 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 759 if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
781 (rq->sector & (sectors_per_frame - 1))) 760 (blk_rq_pos(rq) & (sectors_per_frame - 1)))
782 return ide_stopped; 761 return ide_stopped;
783 762
784 /* use DMA, if possible */ 763 /* use DMA, if possible */
@@ -821,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
821 */ 800 */
822 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 801 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
823 if ((unsigned long)buf & alignment 802 if ((unsigned long)buf & alignment
824 || rq->data_len & q->dma_pad_mask 803 || blk_rq_bytes(rq) & q->dma_pad_mask
825 || object_is_on_stack(buf)) 804 || object_is_on_stack(buf))
826 drive->dma = 0; 805 drive->dma = 0;
827 } 806 }
@@ -869,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
869 848
870 cmd.rq = rq; 849 cmd.rq = rq;
871 850
872 if (blk_fs_request(rq) || rq->data_len) { 851 if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
873 ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9) 852 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
874 : rq->data_len);
875 ide_map_sg(drive, &cmd); 853 ide_map_sg(drive, &cmd);
876 } 854 }
877 855
878 return ide_issue_pc(drive, &cmd); 856 return ide_issue_pc(drive, &cmd);
879out_end: 857out_end:
880 nsectors = rq->hard_nr_sectors; 858 nsectors = blk_rq_sectors(rq);
881 859
882 if (nsectors == 0) 860 if (nsectors == 0)
883 nsectors = 1; 861 nsectors = 1;
@@ -1043,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1043 /* save a private copy of the TOC capacity for error handling */ 1021 /* save a private copy of the TOC capacity for error handling */
1044 drive->probed_capacity = toc->capacity * sectors_per_frame; 1022 drive->probed_capacity = toc->capacity * sectors_per_frame;
1045 1023
1046 blk_queue_hardsect_size(drive->queue, 1024 blk_queue_logical_block_size(drive->queue,
1047 sectors_per_frame << SECTOR_BITS); 1025 sectors_per_frame << SECTOR_BITS);
1048 1026
1049 /* first read just the header, so we know how long the TOC is */ 1027 /* first read just the header, so we know how long the TOC is */
1050 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1360,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1360/* standard prep_rq_fn that builds 10 byte cmds */ 1338/* standard prep_rq_fn that builds 10 byte cmds */
1361static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1362{ 1340{
1363 int hard_sect = queue_hardsect_size(q); 1341 int hard_sect = queue_logical_block_size(q);
1364 long block = (long)rq->hard_sector / (hard_sect >> 9); 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1365 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1366 1344
1367 memset(rq->cmd, 0, BLK_MAX_CDB); 1345 memset(rq->cmd, 0, BLK_MAX_CDB);
1368 1346
@@ -1565,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1565 1543
1566 nslots = ide_cdrom_probe_capabilities(drive); 1544 nslots = ide_cdrom_probe_capabilities(drive);
1567 1545
1568 blk_queue_hardsect_size(q, CD_FRAMESIZE); 1546 blk_queue_logical_block_size(q, CD_FRAMESIZE);
1569 1547
1570 if (ide_cdrom_register(drive, nslots)) { 1548 if (ide_cdrom_register(drive, nslots)) {
1571 printk(KERN_ERR PFX "%s: %s failed to register device with the" 1549 printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c2438804d3c4..c6f7fcfb9d67 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82 sector_t block) 82 sector_t block)
83{ 83{
84 ide_hwif_t *hwif = drive->hwif; 84 ide_hwif_t *hwif = drive->hwif;
85 u16 nsectors = (u16)rq->nr_sectors; 85 u16 nsectors = (u16)blk_rq_sectors(rq);
86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88 struct ide_cmd cmd; 88 struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
90 ide_startstop_t rc; 90 ide_startstop_t rc;
91 91
92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { 92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93 if (block + rq->nr_sectors > 1ULL << 28) 93 if (block + blk_rq_sectors(rq) > 1ULL << 28)
94 dma = 0; 94 dma = 0;
95 else 95 else
96 lba48 = 0; 96 lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
195 195
196 ledtrig_ide_activity(); 196 ledtrig_ide_activity();
197 197
198 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 198 pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
200 (unsigned long long)block, rq->nr_sectors, 200 (unsigned long long)block, blk_rq_sectors(rq),
201 (unsigned long)rq->buffer); 201 (unsigned long)rq->buffer);
202 202
203 if (hwif->rw_disk) 203 if (hwif->rw_disk)
@@ -639,7 +639,7 @@ static void ide_disk_setup(ide_drive_t *drive)
639 } 639 }
640 640
641 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 641 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
642 q->max_sectors / 2); 642 queue_max_sectors(q) / 2);
643 643
644 if (ata_id_is_ssd(id)) 644 if (ata_id_is_ssd(id))
645 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 645 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d9123ecae4a9..001f68f0bb28 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
103 ide_finish_cmd(drive, cmd, stat); 103 ide_finish_cmd(drive, cmd, stat);
104 else 104 else
105 ide_complete_rq(drive, 0, 105 ide_complete_rq(drive, 0,
106 cmd->rq->nr_sectors << 9); 106 blk_rq_sectors(cmd->rq) << 9);
107 return ide_stopped; 107 return ide_stopped;
108 } 108 }
109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", 109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 537b7c558033..650981758f15 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -194,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
194{ 194{
195 struct ide_disk_obj *floppy = drive->driver_data; 195 struct ide_disk_obj *floppy = drive->driver_data;
196 int block = sector / floppy->bs_factor; 196 int block = sector / floppy->bs_factor;
197 int blocks = rq->nr_sectors / floppy->bs_factor; 197 int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
198 int cmd = rq_data_dir(rq); 198 int cmd = rq_data_dir(rq);
199 199
200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); 200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -220,14 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
220 ide_init_pc(pc); 220 ide_init_pc(pc);
221 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 221 memcpy(pc->c, rq->cmd, sizeof(pc->c));
222 pc->rq = rq; 222 pc->rq = rq;
223 if (rq->data_len) { 223 if (blk_rq_bytes(rq)) {
224 pc->flags |= PC_FLAG_DMA_OK; 224 pc->flags |= PC_FLAG_DMA_OK;
225 if (rq_data_dir(rq) == WRITE) 225 if (rq_data_dir(rq) == WRITE)
226 pc->flags |= PC_FLAG_WRITING; 226 pc->flags |= PC_FLAG_WRITING;
227 } 227 }
228 /* pio will be performed by ide_pio_bytes() which handles sg fine */ 228 /* pio will be performed by ide_pio_bytes() which handles sg fine */
229 pc->buf = NULL; 229 pc->buf = NULL;
230 pc->req_xfer = pc->buf_size = rq->data_len; 230 pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
231} 231}
232 232
233static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, 233static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -259,8 +259,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
259 goto out_end; 259 goto out_end;
260 } 260 }
261 if (blk_fs_request(rq)) { 261 if (blk_fs_request(rq)) {
262 if (((long)rq->sector % floppy->bs_factor) || 262 if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
263 (rq->nr_sectors % floppy->bs_factor)) { 263 (blk_rq_sectors(rq) % floppy->bs_factor)) {
264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
265 drive->name); 265 drive->name);
266 goto out_end; 266 goto out_end;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 41d804065d38..bba4297f2f03 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
116unsigned int ide_rq_bytes(struct request *rq) 116unsigned int ide_rq_bytes(struct request *rq)
117{ 117{
118 if (blk_pc_request(rq)) 118 if (blk_pc_request(rq))
119 return rq->data_len; 119 return blk_rq_bytes(rq);
120 else 120 else
121 return rq->hard_cur_sectors << 9; 121 return blk_rq_cur_sectors(rq) << 9;
122} 122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes); 123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124 124
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
133 * and complete the whole request right now 133 * and complete the whole request right now
134 */ 134 */
135 if (blk_noretry_request(rq) && error <= 0) 135 if (blk_noretry_request(rq) && error <= 0)
136 nr_bytes = rq->hard_nr_sectors << 9; 136 nr_bytes = blk_rq_sectors(rq) << 9;
137 137
138 rc = ide_end_rq(drive, rq, error, nr_bytes); 138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0) 139 if (rc == 0)
@@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
279 279
280 if (cmd) { 280 if (cmd) {
281 if (cmd->protocol == ATA_PROT_PIO) { 281 if (cmd->protocol == ATA_PROT_PIO) {
282 ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 282 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
283 ide_map_sg(drive, cmd); 283 ide_map_sg(drive, cmd);
284 } 284 }
285 285
@@ -387,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
387 387
388 drv = *(struct ide_driver **)rq->rq_disk->private_data; 388 drv = *(struct ide_driver **)rq->rq_disk->private_data;
389 389
390 return drv->do_request(drive, rq, rq->sector); 390 return drv->do_request(drive, rq, blk_rq_pos(rq));
391 } 391 }
392 return do_special(drive); 392 return do_special(drive);
393kill_rq: 393kill_rq:
@@ -487,10 +487,10 @@ void do_ide_request(struct request_queue *q)
487 487
488 if (!ide_lock_port(hwif)) { 488 if (!ide_lock_port(hwif)) {
489 ide_hwif_t *prev_port; 489 ide_hwif_t *prev_port;
490
491 WARN_ON_ONCE(hwif->rq);
490repeat: 492repeat:
491 prev_port = hwif->host->cur_port; 493 prev_port = hwif->host->cur_port;
492 hwif->rq = NULL;
493
494 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 494 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
495 time_after(drive->sleep, jiffies)) { 495 time_after(drive->sleep, jiffies)) {
496 ide_unlock_port(hwif); 496 ide_unlock_port(hwif);
@@ -519,7 +519,9 @@ repeat:
519 * we know that the queue isn't empty, but this can happen 519 * we know that the queue isn't empty, but this can happen
520 * if the q->prep_rq_fn() decides to kill a request 520 * if the q->prep_rq_fn() decides to kill a request
521 */ 521 */
522 rq = elv_next_request(drive->queue); 522 if (!rq)
523 rq = blk_fetch_request(drive->queue);
524
523 spin_unlock_irq(q->queue_lock); 525 spin_unlock_irq(q->queue_lock);
524 spin_lock_irq(&hwif->lock); 526 spin_lock_irq(&hwif->lock);
525 527
@@ -531,7 +533,7 @@ repeat:
531 /* 533 /*
532 * Sanity: don't accept a request that isn't a PM request 534 * Sanity: don't accept a request that isn't a PM request
533 * if we are currently power managed. This is very important as 535 * if we are currently power managed. This is very important as
534 * blk_stop_queue() doesn't prevent the elv_next_request() 536 * blk_stop_queue() doesn't prevent the blk_fetch_request()
535 * above to return us whatever is in the queue. Since we call 537 * above to return us whatever is in the queue. Since we call
536 * ide_do_request() ourselves, we end up taking requests while 538 * ide_do_request() ourselves, we end up taking requests while
537 * the queue is blocked... 539 * the queue is blocked...
@@ -555,8 +557,11 @@ repeat:
555 startstop = start_request(drive, rq); 557 startstop = start_request(drive, rq);
556 spin_lock_irq(&hwif->lock); 558 spin_lock_irq(&hwif->lock);
557 559
558 if (startstop == ide_stopped) 560 if (startstop == ide_stopped) {
561 rq = hwif->rq;
562 hwif->rq = NULL;
559 goto repeat; 563 goto repeat;
564 }
560 } else 565 } else
561 goto plug_device; 566 goto plug_device;
562out: 567out:
@@ -572,18 +577,24 @@ plug_device:
572plug_device_2: 577plug_device_2:
573 spin_lock_irq(q->queue_lock); 578 spin_lock_irq(q->queue_lock);
574 579
580 if (rq)
581 blk_requeue_request(q, rq);
575 if (!elv_queue_empty(q)) 582 if (!elv_queue_empty(q))
576 blk_plug_device(q); 583 blk_plug_device(q);
577} 584}
578 585
579static void ide_plug_device(ide_drive_t *drive) 586static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
580{ 587{
581 struct request_queue *q = drive->queue; 588 struct request_queue *q = drive->queue;
582 unsigned long flags; 589 unsigned long flags;
583 590
584 spin_lock_irqsave(q->queue_lock, flags); 591 spin_lock_irqsave(q->queue_lock, flags);
592
593 if (rq)
594 blk_requeue_request(q, rq);
585 if (!elv_queue_empty(q)) 595 if (!elv_queue_empty(q))
586 blk_plug_device(q); 596 blk_plug_device(q);
597
587 spin_unlock_irqrestore(q->queue_lock, flags); 598 spin_unlock_irqrestore(q->queue_lock, flags);
588} 599}
589 600
@@ -632,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
632 unsigned long flags; 643 unsigned long flags;
633 int wait = -1; 644 int wait = -1;
634 int plug_device = 0; 645 int plug_device = 0;
646 struct request *uninitialized_var(rq_in_flight);
635 647
636 spin_lock_irqsave(&hwif->lock, flags); 648 spin_lock_irqsave(&hwif->lock, flags);
637 649
@@ -693,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
693 spin_lock_irq(&hwif->lock); 705 spin_lock_irq(&hwif->lock);
694 enable_irq(hwif->irq); 706 enable_irq(hwif->irq);
695 if (startstop == ide_stopped && hwif->polling == 0) { 707 if (startstop == ide_stopped && hwif->polling == 0) {
708 rq_in_flight = hwif->rq;
709 hwif->rq = NULL;
696 ide_unlock_port(hwif); 710 ide_unlock_port(hwif);
697 plug_device = 1; 711 plug_device = 1;
698 } 712 }
@@ -701,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
701 715
702 if (plug_device) { 716 if (plug_device) {
703 ide_unlock_host(hwif->host); 717 ide_unlock_host(hwif->host);
704 ide_plug_device(drive); 718 ide_requeue_and_plug(drive, rq_in_flight);
705 } 719 }
706} 720}
707 721
@@ -787,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
787 ide_startstop_t startstop; 801 ide_startstop_t startstop;
788 irqreturn_t irq_ret = IRQ_NONE; 802 irqreturn_t irq_ret = IRQ_NONE;
789 int plug_device = 0; 803 int plug_device = 0;
804 struct request *uninitialized_var(rq_in_flight);
790 805
791 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 806 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
792 if (hwif != host->cur_port) 807 if (hwif != host->cur_port)
@@ -866,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
866 */ 881 */
867 if (startstop == ide_stopped && hwif->polling == 0) { 882 if (startstop == ide_stopped && hwif->polling == 0) {
868 BUG_ON(hwif->handler); 883 BUG_ON(hwif->handler);
884 rq_in_flight = hwif->rq;
885 hwif->rq = NULL;
869 ide_unlock_port(hwif); 886 ide_unlock_port(hwif);
870 plug_device = 1; 887 plug_device = 1;
871 } 888 }
@@ -875,7 +892,7 @@ out:
875out_early: 892out_early:
876 if (plug_device) { 893 if (plug_device) {
877 ide_unlock_host(hwif->host); 894 ide_unlock_host(hwif->host);
878 ide_plug_device(drive); 895 ide_requeue_and_plug(drive, rq_in_flight);
879 } 896 }
880 897
881 return irq_ret; 898 return irq_ret;
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 2148df836ce7..e386a32dc9ba 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
96 96
97 if (rq) 97 if (rq)
98 printk(KERN_CONT ", sector=%llu", 98 printk(KERN_CONT ", sector=%llu",
99 (unsigned long long)rq->sector); 99 (unsigned long long)blk_rq_pos(rq));
100 } 100 }
101 printk(KERN_CONT "\n"); 101 printk(KERN_CONT "\n");
102} 102}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 203bbeac182f..d9764f0bc82f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -380,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
380 } 380 }
381 381
382 tape->first_frame += blocks; 382 tape->first_frame += blocks;
383 rq->data_len -= blocks * tape->blk_size; 383 rq->resid_len -= blocks * tape->blk_size;
384 384
385 if (pc->error) { 385 if (pc->error) {
386 uptodate = 0; 386 uptodate = 0;
@@ -586,7 +586,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
586 struct ide_atapi_pc *pc, struct request *rq, 586 struct ide_atapi_pc *pc, struct request *rq,
587 u8 opcode) 587 u8 opcode)
588{ 588{
589 unsigned int length = rq->nr_sectors; 589 unsigned int length = blk_rq_sectors(rq);
590 590
591 ide_init_pc(pc); 591 ide_init_pc(pc);
592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
@@ -617,8 +617,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
617 struct ide_cmd cmd; 617 struct ide_cmd cmd;
618 u8 stat; 618 u8 stat;
619 619
620 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu\n", 620 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
621 (unsigned long long)rq->sector, rq->nr_sectors); 621 (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
622 622
623 if (!(blk_special_request(rq) || blk_sense_request(rq))) { 623 if (!(blk_special_request(rq) || blk_sense_request(rq))) {
624 /* We do not support buffer cache originated requests. */ 624 /* We do not support buffer cache originated requests. */
@@ -892,7 +892,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
892 rq->cmd_type = REQ_TYPE_SPECIAL; 892 rq->cmd_type = REQ_TYPE_SPECIAL;
893 rq->cmd[13] = cmd; 893 rq->cmd[13] = cmd;
894 rq->rq_disk = tape->disk; 894 rq->rq_disk = tape->disk;
895 rq->sector = tape->first_frame; 895 rq->__sector = tape->first_frame;
896 896
897 if (size) { 897 if (size) {
898 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, 898 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
@@ -904,7 +904,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
904 blk_execute_rq(drive->queue, tape->disk, rq, 0); 904 blk_execute_rq(drive->queue, tape->disk, rq, 0);
905 905
906 /* calculate the number of transferred bytes and update buffer state */ 906 /* calculate the number of transferred bytes and update buffer state */
907 size -= rq->data_len; 907 size -= rq->resid_len;
908 tape->cur = tape->buf; 908 tape->cur = tape->buf;
909 if (cmd == REQ_IDETAPE_READ) 909 if (cmd == REQ_IDETAPE_READ)
910 tape->valid = size; 910 tape->valid = size;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index f400eb4d4aff..a0c3e1b2f73c 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -385,7 +385,7 @@ out_end:
385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
386 ide_finish_cmd(drive, cmd, stat); 386 ide_finish_cmd(drive, cmd, stat);
387 else 387 else
388 ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 388 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
389 return ide_stopped; 389 return ide_stopped;
390out_err: 390out_err:
391 ide_error_cmd(drive, cmd); 391 ide_error_cmd(drive, cmd);
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index b3bc96f930a6..e24ecc87a9b1 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
177 u8 clock = inb(high_16 + 0x11); 177 u8 clock = inb(high_16 + 0x11);
178 178
179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); 179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
180 word_count = (rq->nr_sectors << 8); 180 word_count = (blk_rq_sectors(rq) << 8);
181 word_count = (rq_data_dir(rq) == READ) ? 181 word_count = (rq_data_dir(rq) == READ) ?
182 word_count | 0x05000000 : 182 word_count | 0x05000000 :
183 word_count | 0x06000000; 183 word_count | 0x06000000;
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6f..05a93d6baecc 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
112 ide_hwif_t *hwif = drive->hwif; 112 ide_hwif_t *hwif = drive->hwif;
113 unsigned long sc_base = hwif->config_data; 113 unsigned long sc_base = hwif->config_data;
114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
115 unsigned long nsectors = hwif->rq->nr_sectors; 115 unsigned long nsectors = blk_rq_sectors(hwif->rq);
116 116
117 /* 117 /*
118 * We have to manually load the sector count and size into 118 * We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d23976..5ca76224f6d1 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? 307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); 308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
309 309
310 tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt); 310 tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
311 311
312 return 0; 312 return 0;
313} 313}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 56df1cee8fb3..3319c2fec28e 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
233 233
234 if (sync_page_io(rdev->bdev, target, 234 if (sync_page_io(rdev->bdev, target,
235 roundup(size, bdev_hardsect_size(rdev->bdev)), 235 roundup(size, bdev_logical_block_size(rdev->bdev)),
236 page, READ)) { 236 page, READ)) {
237 page->index = index; 237 page->index = index;
238 attach_page_buffers(page, NULL); /* so that free_buffer will 238 attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
287 int size = PAGE_SIZE; 287 int size = PAGE_SIZE;
288 if (page->index == bitmap->file_pages-1) 288 if (page->index == bitmap->file_pages-1)
289 size = roundup(bitmap->last_page_size, 289 size = roundup(bitmap->last_page_size,
290 bdev_hardsect_size(rdev->bdev)); 290 bdev_logical_block_size(rdev->bdev));
291 /* Just make sure we aren't corrupting data or 291 /* Just make sure we aren't corrupting data or
292 * metadata 292 * metadata
293 */ 293 */
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c242141..75d8081a9041 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
178 } 178 }
179 179
180 /* Validate the chunk size against the device block size */ 180 /* Validate the chunk size against the device block size */
181 if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
182 *error = "Chunk size is not a multiple of device blocksize"; 182 *error = "Chunk size is not a multiple of device blocksize";
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d917..6fa8ccf91c70 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
413 * Buffer holds both header and bitset. 413 * Buffer holds both header and bitset.
414 */ 414 */
415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
416 bitset_size, ti->limits.hardsect_size); 416 bitset_size,
417 ti->limits.logical_block_size);
417 418
418 if (buf_size > dev->bdev->bd_inode->i_size) { 419 if (buf_size > dev->bdev->bd_inode->i_size) {
419 DMWARN("log device %s too small: need %llu bytes", 420 DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9a..2662a41337e7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
282 */ 282 */
283 if (!ps->store->chunk_size) { 283 if (!ps->store->chunk_size) {
284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
285 bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 bdev_logical_block_size(ps->store->cow->bdev) >> 9);
286 ps->store->chunk_mask = ps->store->chunk_size - 1; 286 ps->store->chunk_mask = ps->store->chunk_size - 1;
287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
288 chunk_size_supplied = 0; 288 chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d5..e9a73bb242b0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
108 lhs->max_hw_segments = 108 lhs->max_hw_segments =
109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110
111 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 lhs->logical_block_size = max(lhs->logical_block_size,
112 rhs->logical_block_size);
112 113
113 lhs->max_segment_size = 114 lhs->max_segment_size =
114 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
509 * combine_restrictions_low() 510 * combine_restrictions_low()
510 */ 511 */
511 rs->max_sectors = 512 rs->max_sectors =
512 min_not_zero(rs->max_sectors, q->max_sectors); 513 min_not_zero(rs->max_sectors, queue_max_sectors(q));
513 514
514 /* 515 /*
515 * Check if merge fn is supported. 516 * Check if merge fn is supported.
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
524 525
525 rs->max_phys_segments = 526 rs->max_phys_segments =
526 min_not_zero(rs->max_phys_segments, 527 min_not_zero(rs->max_phys_segments,
527 q->max_phys_segments); 528 queue_max_phys_segments(q));
528 529
529 rs->max_hw_segments = 530 rs->max_hw_segments =
530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
531 532
532 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q));
533 535
534 rs->max_segment_size = 536 rs->max_segment_size =
535 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
536 538
537 rs->max_hw_sectors = 539 rs->max_hw_sectors =
538 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); 540 min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
539 541
540 rs->seg_boundary_mask = 542 rs->seg_boundary_mask =
541 min_not_zero(rs->seg_boundary_mask, 543 min_not_zero(rs->seg_boundary_mask,
542 q->seg_boundary_mask); 544 queue_segment_boundary(q));
543 545
544 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); 546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
545 547
546 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
547} 549}
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 if (!rs->max_hw_segments) 686 if (!rs->max_hw_segments)
685 rs->max_hw_segments = MAX_HW_SEGMENTS; 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686 if (!rs->hardsect_size) 688 if (!rs->logical_block_size)
687 rs->hardsect_size = 1 << SECTOR_SHIFT; 689 rs->logical_block_size = 1 << SECTOR_SHIFT;
688 if (!rs->max_segment_size) 690 if (!rs->max_segment_size)
689 rs->max_segment_size = MAX_SEGMENT_SIZE; 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 if (!rs->seg_boundary_mask) 692 if (!rs->seg_boundary_mask)
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
912 * restrictions. 914 * restrictions.
913 */ 915 */
914 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 q->max_phys_segments = t->limits.max_phys_segments; 917 blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
916 q->max_hw_segments = t->limits.max_hw_segments; 918 blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
917 q->hardsect_size = t->limits.hardsect_size; 919 blk_queue_logical_block_size(q, t->limits.logical_block_size);
918 q->max_segment_size = t->limits.max_segment_size; 920 blk_queue_max_segment_size(q, t->limits.max_segment_size);
919 q->max_hw_sectors = t->limits.max_hw_sectors; 921 blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
920 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
921 q->bounce_pfn = t->limits.bounce_pfn; 923 blk_queue_bounce_limit(q, t->limits.bounce_pfn);
922 924
923 if (t->limits.no_cluster) 925 if (t->limits.no_cluster)
924 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a1..64f1f3e046e0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
146 * a one page request is never in violation. 146 * a one page request is never in violation.
147 */ 147 */
148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 149 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151 151
152 disk->num_sectors = rdev->sectors; 152 disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 641b211fe3fe..20f6ac338349 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203 1203
1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1206 if (rdev->sb_size & bmask) 1206 if (rdev->sb_size & bmask)
1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208 1208
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe823..4ee31aa13c40 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
303 * merge_bvec_fn will be involved in multipath.) 303 * merge_bvec_fn will be involved in multipath.)
304 */ 304 */
305 if (q->merge_bvec_fn && 305 if (q->merge_bvec_fn &&
306 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 306 queue_max_sectors(q) > (PAGE_SIZE>>9))
307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
308 308
309 conf->working_disks++; 309 conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
467 * violating it, not that we ever expect a device with 467 * violating it, not that we ever expect a device with
468 * a merge_bvec_fn to be involved in multipath */ 468 * a merge_bvec_fn to be involved in multipath */
469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
470 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 470 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
472 472
473 if (!test_bit(Faulty, &rdev->flags)) 473 if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be55..925507e7d673 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
144 */ 144 */
145 145
146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
147 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 147 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
149 149
150 if (!smallest || (rdev1->sectors < smallest->sectors)) 150 if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde1..e23758b4a34e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
1131 */ 1131 */
1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1133 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1133 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1135 1135
1136 p->head_position = 0; 1136 p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
1996 * a one page request is never in violation. 1996 * a one page request is never in violation.
1997 */ 1997 */
1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1999 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1999 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2001 2001
2002 disk->head_position = 0; 2002 disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44b..750550c1166f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
1159 */ 1159 */
1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1161 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1161 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1162 mddev->queue->max_sectors = (PAGE_SIZE>>9); 1162 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1163 1163
1164 p->head_position = 0; 1164 p->head_position = 0;
1165 rdev->raid_disk = mirror; 1165 rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
2145 * a one page request is never in violation. 2145 * a one page request is never in violation.
2146 */ 2146 */
2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2148 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2148 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2149 mddev->queue->max_sectors = (PAGE_SIZE>>9); 2149 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2150 2150
2151 disk->head_position = 0; 2151 disk->head_position = 0;
2152 } 2152 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bb37fb1b2d82..bef876698232 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
3463{ 3463{
3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3465 3465
3466 if ((bi->bi_size>>9) > q->max_sectors) 3466 if ((bi->bi_size>>9) > queue_max_sectors(q))
3467 return 0; 3467 return 0;
3468 blk_recount_segments(q, bi); 3468 blk_recount_segments(q, bi);
3469 if (bi->bi_phys_segments > q->max_phys_segments) 3469 if (bi->bi_phys_segments > queue_max_phys_segments(q))
3470 return 0; 3470 return 0;
3471 3471
3472 if (q->merge_bvec_fn) 3472 if (q->merge_bvec_fn)
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f0..7847bbc1440d 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
672 msb->req_sg); 672 msb->req_sg);
673 673
674 if (!msb->seg_count) { 674 if (!msb->seg_count) {
675 chunk = __blk_end_request(msb->block_req, -ENOMEM, 675 chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
676 blk_rq_cur_bytes(msb->block_req));
677 continue; 676 continue;
678 } 677 }
679 678
680 t_sec = msb->block_req->sector << 9; 679 t_sec = blk_rq_pos(msb->block_req) << 9;
681 sector_div(t_sec, msb->page_size); 680 sector_div(t_sec, msb->page_size);
682 681
683 count = msb->block_req->nr_sectors << 9; 682 count = blk_rq_bytes(msb->block_req);
684 count /= msb->page_size; 683 count /= msb->page_size;
685 684
686 param.system = msb->system; 685 param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
705 return 0; 704 return 0;
706 } 705 }
707 706
708 dev_dbg(&card->dev, "elv_next\n"); 707 dev_dbg(&card->dev, "blk_fetch\n");
709 msb->block_req = elv_next_request(msb->queue); 708 msb->block_req = blk_fetch_request(msb->queue);
710 if (!msb->block_req) { 709 if (!msb->block_req) {
711 dev_dbg(&card->dev, "issue end\n"); 710 dev_dbg(&card->dev, "issue end\n");
712 return -EAGAIN; 711 return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
745 t_len *= msb->page_size; 744 t_len *= msb->page_size;
746 } 745 }
747 } else 746 } else
748 t_len = msb->block_req->nr_sectors << 9; 747 t_len = blk_rq_bytes(msb->block_req);
749 748
750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 749 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
751 750
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
825 return; 824 return;
826 825
827 if (msb->eject) { 826 if (msb->eject) {
828 while ((req = elv_next_request(q)) != NULL) 827 while ((req = blk_fetch_request(q)) != NULL)
829 __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 828 __blk_end_request_all(req, -ENODEV);
830 829
831 return; 830 return;
832 } 831 }
@@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1243 1242
1244 sprintf(msb->disk->disk_name, "mspblk%d", disk_id); 1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
1245 1244
1246 blk_queue_hardsect_size(msb->queue, msb->page_size); 1245 blk_queue_logical_block_size(msb->queue, msb->page_size);
1247 1246
1248 capacity = be16_to_cpu(sys_info->user_block_count); 1247 capacity = be16_to_cpu(sys_info->user_block_count);
1249 capacity *= be16_to_cpu(sys_info->block_size); 1248 capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b97..79f5433359f9 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1277 /* do we need to support multiple segments? */ 1277 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1280 ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1281 rsp->bio->bi_vcnt, rsp->data_len); 1281 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1282 return -EINVAL; 1282 return -EINVAL;
1283 } 1283 }
1284 1284
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1295 smpreq = (SmpPassthroughRequest_t *)mf; 1295 smpreq = (SmpPassthroughRequest_t *)mf;
1296 memset(smpreq, 0, sizeof(*smpreq)); 1296 memset(smpreq, 0, sizeof(*smpreq));
1297 1297
1298 smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 1298 smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
1300 1300
1301 if (rphy) 1301 if (rphy)
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 1321 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 1322 MPI_SGE_FLAGS_DIRECTION |
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (req->data_len - 4); 1324 flagsLength |= (blk_rq_bytes(req) - 4);
1325 1325
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 req->data_len, PCI_DMA_BIDIRECTIONAL); 1327 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 1328 if (!dma_addr_out)
1329 goto put_mf; 1329 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 1330 mpt_add_sge(psge, flagsLength, dma_addr_out);
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1332 1332
1333 /* response */ 1333 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
1335 flagsLength |= rsp->data_len + 4; 1335 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1337 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 1338 if (!dma_addr_in)
1339 goto unmap; 1339 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 1340 mpt_add_sge(psge, flagsLength, dma_addr_in);
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
1358 memcpy(req->sense, smprep, sizeof(*smprep)); 1358 memcpy(req->sense, smprep, sizeof(*smprep));
1359 req->sense_len = sizeof(*smprep); 1359 req->sense_len = sizeof(*smprep);
1360 req->data_len = 0; 1360 req->resid_len = 0;
1361 rsp->data_len -= smprep->ResponseDataLength; 1361 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 1362 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 1364 ioc->name, __func__);
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1366 } 1366 }
1367unmap: 1367unmap:
1368 if (dma_addr_out) 1368 if (dma_addr_out)
1369 pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 1369 pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
1370 PCI_DMA_BIDIRECTIONAL); 1370 PCI_DMA_BIDIRECTIONAL);
1371 if (dma_addr_in) 1371 if (dma_addr_in)
1372 pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 1372 pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
1373 PCI_DMA_BIDIRECTIONAL); 1373 PCI_DMA_BIDIRECTIONAL);
1374put_mf: 1374put_mf:
1375 if (mf) 1375 if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc41..335d4c78a775 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (blk_end_request(req, error, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes))
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431
432 if (blk_pc_request(req))
433 leftover = req->data_len;
434
435 if (error) 430 if (error)
436 blk_end_request(req, -EIO, leftover); 431 blk_end_request_all(req, -EIO);
437 }
438 432
439 spin_lock_irqsave(q->queue_lock, flags); 433 spin_lock_irqsave(q->queue_lock, flags);
440 434
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 755 break;
762 756
763 case CACHE_SMARTFETCH: 757 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 758 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 759 ctl_flags = 0x201F0008;
766 else 760 else
767 ctl_flags = 0x001F0000; 761 ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 775 ctl_flags = 0x001F0010;
782 break; 776 break;
783 case CACHE_SMARTBACK: 777 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 778 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 779 ctl_flags = 0x001F0004;
786 else 780 else
787 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
788 break; 782 break;
789 case CACHE_SMARTTHROUGH: 783 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
792 else 786 else
793 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
800 if (c->adaptec) { 794 if (c->adaptec) {
801 u8 cmd[10]; 795 u8 cmd[10];
802 u32 scsi_flags; 796 u32 scsi_flags;
803 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 797 u16 hwsec;
804 798
799 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
805 memset(cmd, 0, 10); 800 memset(cmd, 0, 10);
806 801
807 sgl_offset = SGL_OFFSET_12; 802 sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
827 822
828 *mptr++ = cpu_to_le32(scsi_flags); 823 *mptr++ = cpu_to_le32(scsi_flags);
829 824
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 825 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 826 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 827
833 memcpy(mptr, cmd, 10); 828 memcpy(mptr, cmd, 10);
834 mptr += 4; 829 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 830 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
836 } else 831 } else
837#endif 832#endif
838 { 833 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 834 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 835 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 836 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
842 *mptr++ = 837 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 838 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 839 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 840 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 841 }
847 842
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 843 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
883 struct request *req; 878 struct request *req;
884 879
885 while (!blk_queue_plugged(q)) { 880 while (!blk_queue_plugged(q)) {
886 req = elv_next_request(q); 881 req = blk_peek_request(q);
887 if (!req) 882 if (!req)
888 break; 883 break;
889 884
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
896 891
897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 892 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
898 if (!i2o_block_transfer(req)) { 893 if (!i2o_block_transfer(req)) {
899 blkdev_dequeue_request(req); 894 blk_start_request(req);
900 continue; 895 continue;
901 } else 896 } else
902 osm_info("transfer error\n"); 897 osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
922 blk_stop_queue(q); 917 blk_stop_queue(q);
923 break; 918 break;
924 } 919 }
925 } else 920 } else {
926 end_request(req, 0); 921 blk_start_request(req);
922 __blk_end_request_all(req, -EIO);
923 }
927 } 924 }
928}; 925};
929 926
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
1082 */ 1079 */
1083 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1084 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1085 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1082 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1086 } else 1083 } else
1087 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1088 1085
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516ae..98ffc41eaf2c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
243 brq.mrq.cmd = &brq.cmd; 243 brq.mrq.cmd = &brq.cmd;
244 brq.mrq.data = &brq.data; 244 brq.mrq.data = &brq.data;
245 245
246 brq.cmd.arg = req->sector; 246 brq.cmd.arg = blk_rq_pos(req);
247 if (!mmc_card_blockaddr(card)) 247 if (!mmc_card_blockaddr(card))
248 brq.cmd.arg <<= 9; 248 brq.cmd.arg <<= 9;
249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 251 brq.stop.opcode = MMC_STOP_TRANSMISSION;
252 brq.stop.arg = 0; 252 brq.stop.arg = 0;
253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
254 brq.data.blocks = req->nr_sectors; 254 brq.data.blocks = blk_rq_sectors(req);
255 255
256 /* 256 /*
257 * The block layer doesn't support all sector count 257 * The block layer doesn't support all sector count
@@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
301 * Adjust the sg list so it is the same size as the 301 * Adjust the sg list so it is the same size as the
302 * request. 302 * request.
303 */ 303 */
304 if (brq.data.blocks != req->nr_sectors) { 304 if (brq.data.blocks != blk_rq_sectors(req)) {
305 int i, data_size = brq.data.blocks << 9; 305 int i, data_size = brq.data.blocks << 9;
306 struct scatterlist *sg; 306 struct scatterlist *sg;
307 307
@@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
352 printk(KERN_ERR "%s: error %d transferring data," 352 printk(KERN_ERR "%s: error %d transferring data,"
353 " sector %u, nr %u, card status %#x\n", 353 " sector %u, nr %u, card status %#x\n",
354 req->rq_disk->disk_name, brq.data.error, 354 req->rq_disk->disk_name, brq.data.error,
355 (unsigned)req->sector, 355 (unsigned)blk_rq_pos(req),
356 (unsigned)req->nr_sectors, status); 356 (unsigned)blk_rq_sectors(req), status);
357 } 357 }
358 358
359 if (brq.stop.error) { 359 if (brq.stop.error) {
@@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
521 521
522 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 522 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
523 523
524 blk_queue_hardsect_size(md->queue.queue, 512); 524 blk_queue_logical_block_size(md->queue.queue, 512);
525 525
526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
527 /* 527 /*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c67..49e582356c65 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
55 spin_lock_irq(q->queue_lock); 55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q)) 57 if (!blk_queue_plugged(q))
58 req = elv_next_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
88{ 88{
89 struct mmc_queue *mq = q->queuedata; 89 struct mmc_queue *mq = q->queuedata;
90 struct request *req; 90 struct request *req;
91 int ret;
92 91
93 if (!mq) { 92 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) { 94 while ((req = blk_fetch_request(q)) != NULL)
96 do { 95 __blk_end_request_all(req, -EIO);
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
99 } while (ret);
100 }
101 return; 96 return;
102 } 97 }
103 98
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb1..aaac3b6800b7 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
47 unsigned long block, nsect; 47 unsigned long block, nsect;
48 char *buf; 48 char *buf;
49 49
50 block = req->sector << 9 >> tr->blkshift; 50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
52 52
53 buf = req->buffer; 53 buf = req->buffer;
54 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD) 56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect); 57 return tr->discard(dev, block, nsect);
58 58
59 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
60 return 0; 60 return -EIO;
61 61
62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 return 0; 63 get_capacity(req->rq_disk))
64 return -EIO;
64 65
65 switch(rq_data_dir(req)) { 66 switch(rq_data_dir(req)) {
66 case READ: 67 case READ:
67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
68 if (tr->readsect(dev, block, buf)) 69 if (tr->readsect(dev, block, buf))
69 return 0; 70 return -EIO;
70 return 1; 71 return 0;
71 72
72 case WRITE: 73 case WRITE:
73 if (!tr->writesect) 74 if (!tr->writesect)
74 return 0; 75 return -EIO;
75 76
76 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
77 if (tr->writesect(dev, block, buf)) 78 if (tr->writesect(dev, block, buf))
78 return 0; 79 return -EIO;
79 return 1; 80 return 0;
80 81
81 default: 82 default:
82 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
83 return 0; 84 return -EIO;
84 } 85 }
85} 86}
86 87
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
88{ 89{
89 struct mtd_blktrans_ops *tr = arg; 90 struct mtd_blktrans_ops *tr = arg;
90 struct request_queue *rq = tr->blkcore_priv->rq; 91 struct request_queue *rq = tr->blkcore_priv->rq;
92 struct request *req = NULL;
91 93
92 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 94 /* we might get involved when memory gets low, so use PF_MEMALLOC */
93 current->flags |= PF_MEMALLOC; 95 current->flags |= PF_MEMALLOC;
94 96
95 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
98
96 while (!kthread_should_stop()) { 99 while (!kthread_should_stop()) {
97 struct request *req;
98 struct mtd_blktrans_dev *dev; 100 struct mtd_blktrans_dev *dev;
99 int res = 0; 101 int res;
100
101 req = elv_next_request(rq);
102 102
103 if (!req) { 103 if (!req && !(req = blk_fetch_request(rq))) {
104 set_current_state(TASK_INTERRUPTIBLE); 104 set_current_state(TASK_INTERRUPTIBLE);
105 spin_unlock_irq(rq->queue_lock); 105 spin_unlock_irq(rq->queue_lock);
106 schedule(); 106 schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
119 119
120 spin_lock_irq(rq->queue_lock); 120 spin_lock_irq(rq->queue_lock);
121 121
122 end_request(req, res); 122 if (!__blk_end_request_cur(req, res))
123 req = NULL;
123 } 124 }
125
126 if (req)
127 __blk_end_request_all(req, -EIO);
128
124 spin_unlock_irq(rq->queue_lock); 129 spin_unlock_irq(rq->queue_lock);
125 130
126 return 0; 131 return 0;
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
373 } 378 }
374 379
375 tr->blkcore_priv->rq->queuedata = tr; 380 tr->blkcore_priv->rq->queuedata = tr;
376 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
377 if (tr->discard) 382 if (tr->discard)
378 blk_queue_set_discard(tr->blkcore_priv->rq, 383 blk_queue_set_discard(tr->blkcore_priv->rq,
379 blktrans_discard_request); 384 blktrans_discard_request);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c435..27a1be0cd4d4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
603 if (dasd_profile_level != DASD_PROFILE_ON) 603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return; 604 return;
605 605
606 sectors = req->nr_sectors; 606 sectors = blk_rq_sectors(req);
607 if (!cqr->buildclk || !cqr->startclk || 607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk || 608 !cqr->stopclk || !cqr->endclk ||
609 !sectors) 609 !sectors)
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
1614} 1614}
1615 1615
1616/* 1616/*
1617 * posts the buffer_cache about a finalized request
1618 */
1619static inline void dasd_end_request(struct request *req, int error)
1620{
1621 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1622 BUG();
1623}
1624
1625/*
1626 * Process finished error recovery ccw. 1617 * Process finished error recovery ccw.
1627 */ 1618 */
1628static inline void __dasd_block_process_erp(struct dasd_block *block, 1619static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1665 if (basedev->state < DASD_STATE_READY) 1656 if (basedev->state < DASD_STATE_READY)
1666 return; 1657 return;
1667 /* Now we try to fetch requests from the request queue */ 1658 /* Now we try to fetch requests from the request queue */
1668 while (!blk_queue_plugged(queue) && 1659 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1669 elv_next_request(queue)) {
1670
1671 req = elv_next_request(queue);
1672
1673 if (basedev->features & DASD_FEATURE_READONLY && 1660 if (basedev->features & DASD_FEATURE_READONLY &&
1674 rq_data_dir(req) == WRITE) { 1661 rq_data_dir(req) == WRITE) {
1675 DBF_DEV_EVENT(DBF_ERR, basedev, 1662 DBF_DEV_EVENT(DBF_ERR, basedev,
1676 "Rejecting write request %p", 1663 "Rejecting write request %p",
1677 req); 1664 req);
1678 blkdev_dequeue_request(req); 1665 blk_start_request(req);
1679 dasd_end_request(req, -EIO); 1666 __blk_end_request_all(req, -EIO);
1680 continue; 1667 continue;
1681 } 1668 }
1682 cqr = basedev->discipline->build_cp(basedev, block, req); 1669 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1704 "CCW creation failed (rc=%ld) " 1691 "CCW creation failed (rc=%ld) "
1705 "on request %p", 1692 "on request %p",
1706 PTR_ERR(cqr), req); 1693 PTR_ERR(cqr), req);
1707 blkdev_dequeue_request(req); 1694 blk_start_request(req);
1708 dasd_end_request(req, -EIO); 1695 __blk_end_request_all(req, -EIO);
1709 continue; 1696 continue;
1710 } 1697 }
1711 /* 1698 /*
@@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1714 */ 1701 */
1715 cqr->callback_data = (void *) req; 1702 cqr->callback_data = (void *) req;
1716 cqr->status = DASD_CQR_FILLED; 1703 cqr->status = DASD_CQR_FILLED;
1717 blkdev_dequeue_request(req); 1704 blk_start_request(req);
1718 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1705 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1719 dasd_profile_start(block, cqr, req); 1706 dasd_profile_start(block, cqr, req);
1720 } 1707 }
@@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1718 status = cqr->block->base->discipline->free_cp(cqr, req);
1732 if (status <= 0) 1719 if (status <= 0)
1733 error = status ? status : -EIO; 1720 error = status ? status : -EIO;
1734 dasd_end_request(req, error); 1721 __blk_end_request_all(req, error);
1735} 1722}
1736 1723
1737/* 1724/*
@@ -2003,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block)
2003{ 1990{
2004 int max; 1991 int max;
2005 1992
2006 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1993 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2007 max = block->base->discipline->max_blocks << block->s2b_shift; 1994 max = block->base->discipline->max_blocks << block->s2b_shift;
2008 blk_queue_max_sectors(block->request_queue, max); 1995 blk_queue_max_sectors(block->request_queue, max);
2009 blk_queue_max_phys_segments(block->request_queue, -1L); 1996 blk_queue_max_phys_segments(block->request_queue, -1L);
@@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2038 return; 2025 return;
2039 2026
2040 spin_lock_irq(&block->request_queue_lock); 2027 spin_lock_irq(&block->request_queue_lock);
2041 while ((req = elv_next_request(block->request_queue))) { 2028 while ((req = blk_fetch_request(block->request_queue)))
2042 blkdev_dequeue_request(req); 2029 __blk_end_request_all(req, -EIO);
2043 dasd_end_request(req, -EIO);
2044 }
2045 spin_unlock_irq(&block->request_queue_lock); 2030 spin_unlock_irq(&block->request_queue_lock);
2046} 2031}
2047 2032
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f7733446..2efaddfae560 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
505 return ERR_PTR(-EINVAL); 505 return ERR_PTR(-EINVAL);
506 blksize = block->bp_block; 506 blksize = block->bp_block;
507 /* Calculate record id of first and last block. */ 507 /* Calculate record id of first and last block. */
508 first_rec = req->sector >> block->s2b_shift; 508 first_rec = blk_rq_pos(req) >> block->s2b_shift;
509 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 509 last_rec =
510 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
510 /* Check struct bio and count the number of blocks for the request. */ 511 /* Check struct bio and count the number of blocks for the request. */
511 count = 0; 512 count = 0;
512 rq_for_each_segment(bv, req, iter) { 513 rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f06..a41c94053e64 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2354 blksize = block->bp_block; 2354 blksize = block->bp_block;
2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2356 /* Calculate record id of first and last block. */ 2356 /* Calculate record id of first and last block. */
2357 first_rec = first_trk = req->sector >> block->s2b_shift; 2357 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2358 first_offs = sector_div(first_trk, blk_per_trk); 2358 first_offs = sector_div(first_trk, blk_per_trk);
2359 last_rec = last_trk = 2359 last_rec = last_trk =
2360 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2361 last_offs = sector_div(last_trk, blk_per_trk); 2361 last_offs = sector_div(last_trk, blk_per_trk);
2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2363 2363
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2420 private = (struct dasd_eckd_private *) cqr->block->base->private;
2421 blksize = cqr->block->bp_block; 2421 blksize = cqr->block->bp_block;
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2423 recid = req->sector >> cqr->block->s2b_shift; 2423 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2424 ccw = cqr->cpaddr; 2424 ccw = cqr->cpaddr;
2425 /* Skip over define extent & locate record. */ 2425 /* Skip over define extent & locate record. */
2426 ccw++; 2426 ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd14673..8912358daa2f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
270 return ERR_PTR(-EINVAL); 270 return ERR_PTR(-EINVAL);
271 blksize = block->bp_block; 271 blksize = block->bp_block;
272 /* Calculate record id of first and last block. */ 272 /* Calculate record id of first and last block. */
273 first_rec = req->sector >> block->s2b_shift; 273 first_rec = blk_rq_pos(req) >> block->s2b_shift;
274 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 274 last_rec =
275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
275 /* Check struct bio and count the number of blocks for the request. */ 276 /* Check struct bio and count the number of blocks for the request. */
276 count = 0; 277 count = 0;
277 cidaw = 0; 278 cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
309 ccw = cqr->cpaddr; 310 ccw = cqr->cpaddr;
310 /* First ccw is define extent. */ 311 /* First ccw is define extent. */
311 define_extent(ccw++, cqr->data, rq_data_dir(req), 312 define_extent(ccw++, cqr->data, rq_data_dir(req),
312 block->bp_block, req->sector, req->nr_sectors); 313 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
313 /* Build locate_record + read/write ccws. */ 314 /* Build locate_record + read/write ccws. */
314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 315 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
315 LO_data = (struct LO_fba_data *) (idaws + cidaw); 316 LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33c..a4c7ffcd9987 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
602 dev_info->gd->private_data = dev_info; 602 dev_info->gd->private_data = dev_info;
603 dev_info->gd->driverfs_dev = &dev_info->dev; 603 dev_info->gd->driverfs_dev = &dev_info->dev;
604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
605 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 605 blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
606 606
607 seg_byte_size = (dev_info->end - dev_info->start + 1); 607 seg_byte_size = (dev_info->end - dev_info->start + 1);
608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898a..0ae0c83ef879 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
343 goto out; 343 goto out;
344 } 344 }
345 blk_queue_make_request(xpram_queues[i], xpram_make_request); 345 blk_queue_make_request(xpram_queues[i], xpram_make_request);
346 blk_queue_hardsect_size(xpram_queues[i], 4096); 346 blk_queue_logical_block_size(xpram_queues[i], 4096);
347 } 347 }
348 348
349 /* 349 /*
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd3..2d00a383a475 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134 /* Setup ccws. */ 1134 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1135 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1136 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1139
1140 ccw = request->cpaddr; 1140 ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd7..c453b2f3e9f4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
633 struct req_iterator iter; 633 struct req_iterator iter;
634 634
635 DBF_EVENT(6, "xBREDid:"); 635 DBF_EVENT(6, "xBREDid:");
636 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
637 DBF_EVENT(6, "start_block = %i\n", start_block); 637 DBF_EVENT(6, "start_block = %i\n", start_block);
638 638
639 rq_for_each_segment(bv, req, iter) 639 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f2..47ff695255ea 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int error)
78{
79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG();
81}
82
83static void
84__tapeblock_end_request(struct tape_request *ccw_req, void *data) 77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
85{ 78{
86 struct tape_device *device; 79 struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
90 83
91 device = ccw_req->device; 84 device = ccw_req->device;
92 req = (struct request *) data; 85 req = (struct request *) data;
93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
95 /* Update position. */ 88 /* Update position. */
96 device->blk_data.block_position = 89 device->blk_data.block_position =
97 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
98 else 91 else
99 /* We lost the position information due to an error. */ 92 /* We lost the position information due to an error. */
100 device->blk_data.block_position = -1; 93 device->blk_data.block_position = -1;
101 device->discipline->free_bread(ccw_req); 94 device->discipline->free_bread(ccw_req);
102 if (!list_empty(&device->req_queue) || 95 if (!list_empty(&device->req_queue) ||
103 elv_next_request(device->blk_data.request_queue)) 96 blk_peek_request(device->blk_data.request_queue))
104 tapeblock_trigger_requeue(device); 97 tapeblock_trigger_requeue(device);
105} 98}
106 99
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
118 ccw_req = device->discipline->bread(device, req); 111 ccw_req = device->discipline->bread(device, req);
119 if (IS_ERR(ccw_req)) { 112 if (IS_ERR(ccw_req)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 113 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req, -EIO); 114 blk_end_request_all(req, -EIO);
122 return PTR_ERR(ccw_req); 115 return PTR_ERR(ccw_req);
123 } 116 }
124 ccw_req->callback = __tapeblock_end_request; 117 ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
131 * Start/enqueueing failed. No retries in 124 * Start/enqueueing failed. No retries in
132 * this case. 125 * this case.
133 */ 126 */
134 tapeblock_end_request(req, -EIO); 127 blk_end_request_all(req, -EIO);
135 device->discipline->free_bread(ccw_req); 128 device->discipline->free_bread(ccw_req);
136 } 129 }
137 130
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
169 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
170 while ( 163 while (
171 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
172 elv_next_request(queue) && 165 (req = blk_fetch_request(queue)) &&
173 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
174 ) { 167 ) {
175 req = elv_next_request(queue);
176 if (rq_data_dir(req) == WRITE) { 168 if (rq_data_dir(req) == WRITE) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock); 170 spin_unlock_irq(&device->blk_data.request_queue_lock);
180 tapeblock_end_request(req, -EIO); 171 blk_end_request_all(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock); 172 spin_lock_irq(&device->blk_data.request_queue_lock);
182 continue; 173 continue;
183 } 174 }
184 blkdev_dequeue_request(req);
185 nr_queued++; 175 nr_queued++;
186 spin_unlock_irq(&device->blk_data.request_queue_lock); 176 spin_unlock_irq(&device->blk_data.request_queue_lock);
187 rc = tapeblock_start_request(device, req); 177 rc = tapeblock_start_request(device, req);
@@ -232,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
232 if (rc) 222 if (rc)
233 goto cleanup_queue; 223 goto cleanup_queue;
234 224
235 blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 225 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
236 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
237 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 227 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
238 blk_queue_max_hw_segments(blkdat->request_queue, -1L); 228 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e8548..6d4651684688 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
186{ 186{
187 struct request *req; 187 struct request *req;
188 188
189 while ((req = elv_next_request(q)) != NULL) { 189 req = blk_fetch_request(q);
190 while (req) {
190 struct jsfd_part *jdp = req->rq_disk->private_data; 191 struct jsfd_part *jdp = req->rq_disk->private_data;
191 unsigned long offset = req->sector << 9; 192 unsigned long offset = blk_rq_pos(req) << 9;
192 size_t len = req->current_nr_sectors << 9; 193 size_t len = blk_rq_cur_bytes(req);
194 int err = -EIO;
193 195
194 if ((offset + len) > jdp->dsize) { 196 if ((offset + len) > jdp->dsize)
195 end_request(req, 0); 197 goto end;
196 continue;
197 }
198 198
199 if (rq_data_dir(req) != READ) { 199 if (rq_data_dir(req) != READ) {
200 printk(KERN_ERR "jsfd: write\n"); 200 printk(KERN_ERR "jsfd: write\n");
201 end_request(req, 0); 201 goto end;
202 continue;
203 } 202 }
204 203
205 if ((jdp->dbase & 0xff000000) != 0x20000000) { 204 if ((jdp->dbase & 0xff000000) != 0x20000000) {
206 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); 205 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
207 end_request(req, 0); 206 goto end;
208 continue;
209 } 207 }
210 208
211 jsfd_read(req->buffer, jdp->dbase + offset, len); 209 jsfd_read(req->buffer, jdp->dbase + offset, len);
212 210 err = 0;
213 end_request(req, 1); 211 end:
212 if (!__blk_end_request_cur(req, err))
213 req = blk_fetch_request(q);
214 } 214 }
215} 215}
216 216
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b5..c7076ce25e21 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e436788..54fa1e42dc4d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48a..1bc3b7567994 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c7..8032c5adb6a9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1312,10 +1312,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1313 uint64_t failing_sector = 0;
1314 1314
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1316 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1318 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1319
1320 spin_lock(&_dump_buf_lock); 1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1321 if (!_dump_buf_done) {
@@ -2378,15 +2378,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 2378 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 2380 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 2381 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 2382 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 2383 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 2384 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 2386 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 2387 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 2388 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 2389 blk_rq_sectors(cmnd->request),
2390 cmnd); 2390 cmnd);
2391 2391
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +2406,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 2406 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 2408 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 2409 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 2410 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 2411 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 2412 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 2414 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 2415 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 2416 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 2417 blk_rq_sectors(cmnd->request), cmnd);
2418 else 2418 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 2420 "9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a0..5c65da519e39 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab2..5776b2ab6b12 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -889,26 +889,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 889}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 890EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 891
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 892static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 893 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 894 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +904,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 904 else
925 pad_buff = io->pad_buff; 905 pad_buff = io->pad_buff;
926 906
927 ret = _append_map_kern(io->req, pad_buff, padding, 907 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 908 or->alloc_flags);
929 if (ret) 909 if (ret)
930 return ret; 910 return ret;
931 io->total_bytes += padding; 911 io->total_bytes += padding;
932 } 912 }
933 913
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 914 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 915 or->alloc_flags);
936 if (ret) 916 if (ret)
937 return ret; 917 return ret;
@@ -1293,6 +1273,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1273/*
1294 * osd_finalize_request and helpers 1274 * osd_finalize_request and helpers
1295 */ 1275 */
1276static struct request *_make_request(struct request_queue *q, bool has_write,
1277 struct _osd_io_info *oii, gfp_t flags)
1278{
1279 if (oii->bio)
1280 return blk_make_request(q, oii->bio, flags);
1281 else {
1282 struct request *req;
1283
1284 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1285 if (unlikely(!req))
1286 return ERR_PTR(-ENOMEM);
1287
1288 return req;
1289 }
1290}
1296 1291
1297static int _init_blk_request(struct osd_request *or, 1292static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1293 bool has_in, bool has_out)
@@ -1301,11 +1296,13 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1296 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1297 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1298 struct request *req;
1304 int ret = -ENOMEM; 1299 int ret;
1305 1300
1306 req = blk_get_request(q, has_out, flags); 1301 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1302 if (IS_ERR(req)) {
1303 ret = PTR_ERR(req);
1308 goto out; 1304 goto out;
1305 }
1309 1306
1310 or->request = req; 1307 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1308 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1318,9 +1315,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1315 or->out.req = req;
1319 if (has_in) { 1316 if (has_in) {
1320 /* allocate bidi request */ 1317 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1318 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1319 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1320 OSD_DEBUG("blk_get_request for bidi failed\n");
1321 ret = PTR_ERR(req);
1324 goto out; 1322 goto out;
1325 } 1323 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1324 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1364,26 +1362,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1362 return ret;
1365 } 1363 }
1366 1364
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1365 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1366 or->in.pad_buff = sg_in_pad_buffer;
1389 1367
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e98..dd3f9d2b99fd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1359 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1360 struct Scsi_Host *shost = sdev->host;
1390 1361
1391 blkdev_dequeue_request(req); 1362 blk_start_request(req);
1392 1363
1393 if (unlikely(cmd == NULL)) { 1364 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1365 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1451
1481 if (!sdev) { 1452 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1453 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1454 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1455 scsi_kill_request(req, q);
1485 return; 1456 return;
1486 } 1457 }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1472 * that the request is fully prepared even if we cannot
1502 * accept it. 1473 * accept it.
1503 */ 1474 */
1504 req = elv_next_request(q); 1475 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1477 break;
1507 1478
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1488 * Remove the request from the request list.
1518 */ 1489 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1491 blk_start_request(req);
1521 sdev->device_busy++; 1492 sdev->device_busy++;
1522 1493
1523 spin_unlock(q->queue_lock); 1494 spin_unlock(q->queue_lock);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6a..10303272ba45 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2d..d606452297cf 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b637..bcf3bd40bbd5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
@@ -1510,7 +1510,7 @@ got_data:
1510 */ 1510 */
1511 sector_size = 512; 1511 sector_size = 512;
1512 } 1512 }
1513 blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1514
1515 { 1515 {
1516 char cap_str_2[10], cap_str_10[10]; 1516 char cap_str_2[10], cap_str_10[10];
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff492797..82f14a9482d0 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 91e316fe6522..8201387b4daa 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 q->max_phys_segments); 293 queue_max_phys_segments(q));
294 } 294 }
295 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp; 296 filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
909 if (val < 0) 909 if (val < 0)
910 return -EINVAL; 910 return -EINVAL;
911 val = min_t(int, val, 911 val = min_t(int, val,
912 sdp->device->request_queue->max_sectors * 512); 912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) { 913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called) 914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY; 915 return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
919 return 0; 919 return 0;
920 case SG_GET_RESERVED_SIZE: 920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen, 921 val = min_t(int, sfp->reserve.bufflen,
922 sdp->device->request_queue->max_sectors * 512); 922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip); 923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q: 924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip); 925 result = get_user(val, ip);
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 return -ENODEV; 1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p); 1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET: 1061 case BLKSECTGET:
1062 return put_user(sdp->device->request_queue->max_sectors * 512, 1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip); 1063 ip);
1064 case BLKTRACESETUP: 1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue, 1065 return blk_trace_setup(sdp->device->request_queue,
@@ -1261,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1261 1261
1262 sense = rq->sense; 1262 sense = rq->sense;
1263 result = rq->errors; 1263 result = rq->errors;
1264 resid = rq->data_len; 1264 resid = rq->resid_len;
1265 1265
1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1267 sdp->disk->disk_name, srp->header.pack_id, result)); 1267 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1378,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1378 sdp->device = scsidp; 1378 sdp->device = scsidp;
1379 INIT_LIST_HEAD(&sdp->sfds); 1379 INIT_LIST_HEAD(&sdp->sfds);
1380 init_waitqueue_head(&sdp->o_excl_wait); 1380 init_waitqueue_head(&sdp->o_excl_wait);
1381 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1381 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1382 queue_max_phys_segments(q));
1382 sdp->index = k; 1383 sdp->index = k;
1383 kref_init(&sdp->d_ref); 1384 kref_init(&sdp->d_ref);
1384 1385
@@ -2056,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2056 sg_big_buff = def_reserved_size; 2057 sg_big_buff = def_reserved_size;
2057 2058
2058 bufflen = min_t(int, sg_big_buff, 2059 bufflen = min_t(int, sg_big_buff,
2059 sdp->device->request_queue->max_sectors * 512); 2060 queue_max_sectors(sdp->device->request_queue) * 512);
2060 sg_build_reserve(sfp, bufflen); 2061 sg_build_reserve(sfp, bufflen);
2061 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2062 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2062 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2063 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad5..cd350dfc1216 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
726 } 727 }
727 728
728 queue = cd->device->request_queue; 729 queue = cd->device->request_queue;
729 blk_queue_hardsect_size(queue, sector_size); 730 blk_queue_logical_block_size(queue, sector_size);
730 731
731 return; 732 return;
732} 733}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f14..89bd438e1fe3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(SDp->request_queue->max_hw_segments, 3986 i = min(queue_max_hw_segments(SDp->request_queue),
3987 SDp->request_queue->max_phys_segments); 3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cbe..54023d41fd15 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b5860643..cfa26d56ce60 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
132 132
133 if (us->fflags & US_FL_MAX_SECTORS_MIN) 133 if (us->fflags & US_FL_MAX_SECTORS_MIN)
134 max_sectors = PAGE_CACHE_SIZE >> 9; 134 max_sectors = PAGE_CACHE_SIZE >> 9;
135 if (sdev->request_queue->max_sectors > max_sectors) 135 if (queue_max_sectors(sdev->request_queue) > max_sectors)
136 blk_queue_max_sectors(sdev->request_queue, 136 blk_queue_max_sectors(sdev->request_queue,
137 max_sectors); 137 max_sectors);
138 } else if (sdev->type == TYPE_TAPE) { 138 } else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
483{ 483{
484 struct scsi_device *sdev = to_scsi_device(dev); 484 struct scsi_device *sdev = to_scsi_device(dev);
485 485
486 return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); 486 return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
487} 487}
488 488
489/* Input routine for the sysfs max_sectors file */ 489/* Input routine for the sysfs max_sectors file */
diff --git a/fs/bio.c b/fs/bio.c
index 740699c4f90c..59000215e59b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -498,11 +498,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
498 struct request_queue *q = bdev_get_queue(bdev); 498 struct request_queue *q = bdev_get_queue(bdev);
499 int nr_pages; 499 int nr_pages;
500 500
501 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 501 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
502 if (nr_pages > q->max_phys_segments) 502 if (nr_pages > queue_max_phys_segments(q))
503 nr_pages = q->max_phys_segments; 503 nr_pages = queue_max_phys_segments(q);
504 if (nr_pages > q->max_hw_segments) 504 if (nr_pages > queue_max_hw_segments(q))
505 nr_pages = q->max_hw_segments; 505 nr_pages = queue_max_hw_segments(q);
506 506
507 return nr_pages; 507 return nr_pages;
508} 508}
@@ -561,8 +561,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
561 * make this too complex. 561 * make this too complex.
562 */ 562 */
563 563
564 while (bio->bi_phys_segments >= q->max_phys_segments 564 while (bio->bi_phys_segments >= queue_max_phys_segments(q)
565 || bio->bi_phys_segments >= q->max_hw_segments) { 565 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
566 566
567 if (retried_segments) 567 if (retried_segments)
568 return 0; 568 return 0;
@@ -633,7 +633,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
633int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 633int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
634 unsigned int len, unsigned int offset) 634 unsigned int len, unsigned int offset)
635{ 635{
636 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 636 return __bio_add_page(q, bio, page, len, offset,
637 queue_max_hw_sectors(q));
637} 638}
638 639
639/** 640/**
@@ -653,7 +654,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
653 unsigned int offset) 654 unsigned int offset)
654{ 655{
655 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 656 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
656 return __bio_add_page(q, bio, page, len, offset, q->max_sectors); 657 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
657} 658}
658 659
659struct bio_map_data { 660struct bio_map_data {
@@ -720,7 +721,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
720 721
721 while (bv_len && iov_idx < iov_count) { 722 while (bv_len && iov_idx < iov_count) {
722 unsigned int bytes; 723 unsigned int bytes;
723 char *iov_addr; 724 char __user *iov_addr;
724 725
725 bytes = min_t(unsigned int, 726 bytes = min_t(unsigned int,
726 iov[iov_idx].iov_len - iov_off, bv_len); 727 iov[iov_idx].iov_len - iov_off, bv_len);
@@ -1200,7 +1201,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
1200 char *addr = page_address(bvec->bv_page); 1201 char *addr = page_address(bvec->bv_page);
1201 int len = bmd->iovecs[i].bv_len; 1202 int len = bmd->iovecs[i].bv_len;
1202 1203
1203 if (read && !err) 1204 if (read)
1204 memcpy(p, addr, len); 1205 memcpy(p, addr, len);
1205 1206
1206 __free_page(bvec->bv_page); 1207 __free_page(bvec->bv_page);
@@ -1489,11 +1490,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1489sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1490sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1490 unsigned int offset) 1491 unsigned int offset)
1491{ 1492{
1492 unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); 1493 unsigned int sector_sz;
1493 struct bio_vec *bv; 1494 struct bio_vec *bv;
1494 sector_t sectors; 1495 sector_t sectors;
1495 int i; 1496 int i;
1496 1497
1498 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1497 sectors = 0; 1499 sectors = 0;
1498 1500
1499 if (index >= bio->bi_idx) 1501 if (index >= bio->bi_idx)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f45dbc18dd17..2dfc6cdcebbe 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -76,7 +76,7 @@ int set_blocksize(struct block_device *bdev, int size)
76 return -EINVAL; 76 return -EINVAL;
77 77
78 /* Size cannot be smaller than the size supported by the device */ 78 /* Size cannot be smaller than the size supported by the device */
79 if (size < bdev_hardsect_size(bdev)) 79 if (size < bdev_logical_block_size(bdev))
80 return -EINVAL; 80 return -EINVAL;
81 81
82 /* Don't change the size if it is same as current */ 82 /* Don't change the size if it is same as current */
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(sb_set_blocksize);
106 106
107int sb_min_blocksize(struct super_block *sb, int size) 107int sb_min_blocksize(struct super_block *sb, int size)
108{ 108{
109 int minsize = bdev_hardsect_size(sb->s_bdev); 109 int minsize = bdev_logical_block_size(sb->s_bdev);
110 if (size < minsize) 110 if (size < minsize)
111 size = minsize; 111 size = minsize;
112 return sb_set_blocksize(sb, size); 112 return sb_set_blocksize(sb, size);
@@ -1111,7 +1111,7 @@ EXPORT_SYMBOL(check_disk_change);
1111 1111
1112void bd_set_size(struct block_device *bdev, loff_t size) 1112void bd_set_size(struct block_device *bdev, loff_t size)
1113{ 1113{
1114 unsigned bsize = bdev_hardsect_size(bdev); 1114 unsigned bsize = bdev_logical_block_size(bdev);
1115 1115
1116 bdev->bd_inode->i_size = size; 1116 bdev->bd_inode->i_size = size;
1117 while (bsize < PAGE_CACHE_SIZE) { 1117 while (bsize < PAGE_CACHE_SIZE) {
diff --git a/fs/buffer.c b/fs/buffer.c
index 1864d0b63088..a3ef091a45bd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1085,12 +1085,12 @@ static struct buffer_head *
1085__getblk_slow(struct block_device *bdev, sector_t block, int size) 1085__getblk_slow(struct block_device *bdev, sector_t block, int size)
1086{ 1086{
1087 /* Size must be multiple of hard sectorsize */ 1087 /* Size must be multiple of hard sectorsize */
1088 if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1088 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1089 (size < 512 || size > PAGE_SIZE))) { 1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 size); 1091 size);
1092 printk(KERN_ERR "hardsect size: %d\n", 1092 printk(KERN_ERR "logical block size: %d\n",
1093 bdev_hardsect_size(bdev)); 1093 bdev_logical_block_size(bdev));
1094 1094
1095 dump_stack(); 1095 dump_stack();
1096 return NULL; 1096 return NULL;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 6a347fbc998a..ffd42815fda1 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -47,6 +47,8 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
47 struct pipe_inode_info *pipe, size_t count, 47 struct pipe_inode_info *pipe, size_t count,
48 unsigned int flags) 48 unsigned int flags)
49{ 49{
50 ssize_t (*splice_read)(struct file *, loff_t *,
51 struct pipe_inode_info *, size_t, unsigned int);
50 struct coda_file_info *cfi; 52 struct coda_file_info *cfi;
51 struct file *host_file; 53 struct file *host_file;
52 54
@@ -54,10 +56,11 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
54 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); 56 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
55 host_file = cfi->cfi_container; 57 host_file = cfi->cfi_container;
56 58
57 if (!host_file->f_op || !host_file->f_op->splice_read) 59 splice_read = host_file->f_op->splice_read;
58 return -EINVAL; 60 if (!splice_read)
61 splice_read = default_file_splice_read;
59 62
60 return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags); 63 return splice_read(host_file, ppos, pipe, count, flags);
61} 64}
62 65
63static ssize_t 66static ssize_t
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 05763bbc2050..8b10b87dc01a 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1127,7 +1127,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1127 rw = WRITE_ODIRECT; 1127 rw = WRITE_ODIRECT;
1128 1128
1129 if (bdev) 1129 if (bdev)
1130 bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); 1130 bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
1131 1131
1132 if (offset & blocksize_mask) { 1132 if (offset & blocksize_mask) {
1133 if (bdev) 1133 if (bdev)
diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c
index b249ae97fb15..06ca92672eb5 100644
--- a/fs/exofs/osd.c
+++ b/fs/exofs/osd.c
@@ -50,10 +50,10 @@ int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid)
50 50
51 /* FIXME: should be include in osd_sense_info */ 51 /* FIXME: should be include in osd_sense_info */
52 if (in_resid) 52 if (in_resid)
53 *in_resid = or->in.req ? or->in.req->data_len : 0; 53 *in_resid = or->in.req ? or->in.req->resid_len : 0;
54 54
55 if (out_resid) 55 if (out_resid)
56 *out_resid = or->out.req ? or->out.req->data_len : 0; 56 *out_resid = or->out.req ? or->out.req->resid_len : 0;
57 57
58 return ret; 58 return ret;
59} 59}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index d8b73d4abe3e..3c70d52afb10 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1696,7 +1696,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1696 goto failed_mount; 1696 goto failed_mount;
1697 } 1697 }
1698 1698
1699 hblock = bdev_hardsect_size(sb->s_bdev); 1699 hblock = bdev_logical_block_size(sb->s_bdev);
1700 if (sb->s_blocksize != blocksize) { 1700 if (sb->s_blocksize != blocksize) {
1701 /* 1701 /*
1702 * Make sure the blocksize for the filesystem is larger 1702 * Make sure the blocksize for the filesystem is larger
@@ -2120,7 +2120,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
2120 } 2120 }
2121 2121
2122 blocksize = sb->s_blocksize; 2122 blocksize = sb->s_blocksize;
2123 hblock = bdev_hardsect_size(bdev); 2123 hblock = bdev_logical_block_size(bdev);
2124 if (blocksize < hblock) { 2124 if (blocksize < hblock) {
2125 printk(KERN_ERR 2125 printk(KERN_ERR
2126 "EXT3-fs: blocksize too small for journal device.\n"); 2126 "EXT3-fs: blocksize too small for journal device.\n");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c191d0f65fed..f016707597a7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3035,7 +3035,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
3035 } 3035 }
3036 3036
3037 blocksize = sb->s_blocksize; 3037 blocksize = sb->s_blocksize;
3038 hblock = bdev_hardsect_size(bdev); 3038 hblock = bdev_logical_block_size(bdev);
3039 if (blocksize < hblock) { 3039 if (blocksize < hblock) {
3040 ext4_msg(sb, KERN_ERR, 3040 ext4_msg(sb, KERN_ERR,
3041 "blocksize too small for journal device"); 3041 "blocksize too small for journal device");
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index f234aba36fb8..cc34f271b3e7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -525,11 +525,11 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
525 } 525 }
526 526
527 /* Set up the buffer cache and SB for real */ 527 /* Set up the buffer cache and SB for real */
528 if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { 528 if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
529 ret = -EINVAL; 529 ret = -EINVAL;
530 fs_err(sdp, "FS block size (%u) is too small for device " 530 fs_err(sdp, "FS block size (%u) is too small for device "
531 "block size (%u)\n", 531 "block size (%u)\n",
532 sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); 532 sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
533 goto out; 533 goto out;
534 } 534 }
535 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { 535 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 6122c7ee3648..de3239731db8 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -842,7 +842,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
842 struct super_block *sb = sdp->sd_vfs; 842 struct super_block *sb = sdp->sd_vfs;
843 struct block_device *bdev = sb->s_bdev; 843 struct block_device *bdev = sb->s_bdev;
844 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 844 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
845 bdev_hardsect_size(sb->s_bdev); 845 bdev_logical_block_size(sb->s_bdev);
846 u64 blk; 846 u64 blk;
847 sector_t start = 0; 847 sector_t start = 0;
848 sector_t nr_sects = 0; 848 sector_t nr_sects = 0;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 7f65b3be4aa9..a91f15b8673c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -515,7 +515,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
515 515
516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
517 if (sb->s_blocksize != blocksize) { 517 if (sb->s_blocksize != blocksize) {
518 int hw_blocksize = bdev_hardsect_size(sb->s_bdev); 518 int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
519 519
520 if (blocksize < hw_blocksize) { 520 if (blocksize < hw_blocksize) {
521 printk(KERN_ERR 521 printk(KERN_ERR
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index f76951dcd4a6..6aa7c4713536 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -25,7 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/blkdev.h> /* For bdev_hardsect_size(). */ 28#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
29#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
30#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
31#include <linux/vfs.h> 31#include <linux/vfs.h>
@@ -2785,13 +2785,13 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2785 goto err_out_now; 2785 goto err_out_now;
2786 2786
2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */
2788 if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2788 if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
2789 if (!silent) 2789 if (!silent)
2790 ntfs_error(sb, "Device has unsupported sector size " 2790 ntfs_error(sb, "Device has unsupported sector size "
2791 "(%i). The maximum supported sector " 2791 "(%i). The maximum supported sector "
2792 "size on this architecture is %lu " 2792 "size on this architecture is %lu "
2793 "bytes.", 2793 "bytes.",
2794 bdev_hardsect_size(sb->s_bdev), 2794 bdev_logical_block_size(sb->s_bdev),
2795 PAGE_CACHE_SIZE); 2795 PAGE_CACHE_SIZE);
2796 goto err_out_now; 2796 goto err_out_now;
2797 } 2797 }
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 4f85eceab376..09cc25d04611 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1371,7 +1371,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
1371 1371
1372 bdevname(reg->hr_bdev, reg->hr_dev_name); 1372 bdevname(reg->hr_bdev, reg->hr_dev_name);
1373 1373
1374 sectsize = bdev_hardsect_size(reg->hr_bdev); 1374 sectsize = bdev_logical_block_size(reg->hr_bdev);
1375 if (sectsize != reg->hr_block_bytes) { 1375 if (sectsize != reg->hr_block_bytes) {
1376 mlog(ML_ERROR, 1376 mlog(ML_ERROR,
1377 "blocksize %u incorrect for device, expected %d", 1377 "blocksize %u incorrect for device, expected %d",
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 79ff8d9d37e0..5c6163f55039 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -713,7 +713,7 @@ static int ocfs2_sb_probe(struct super_block *sb,
713 *bh = NULL; 713 *bh = NULL;
714 714
715 /* may be > 512 */ 715 /* may be > 512 */
716 *sector_size = bdev_hardsect_size(sb->s_bdev); 716 *sector_size = bdev_logical_block_size(sb->s_bdev);
717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) { 717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", 718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
719 *sector_size, OCFS2_MAX_BLOCKSIZE); 719 *sector_size, OCFS2_MAX_BLOCKSIZE);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 99e33ef40be4..0af36085eb28 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -219,6 +219,13 @@ ssize_t part_size_show(struct device *dev,
219 return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); 219 return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
220} 220}
221 221
222ssize_t part_alignment_offset_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224{
225 struct hd_struct *p = dev_to_part(dev);
226 return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
227}
228
222ssize_t part_stat_show(struct device *dev, 229ssize_t part_stat_show(struct device *dev,
223 struct device_attribute *attr, char *buf) 230 struct device_attribute *attr, char *buf)
224{ 231{
@@ -272,6 +279,7 @@ ssize_t part_fail_store(struct device *dev,
272static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); 279static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
273static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); 280static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
274static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 281static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
282static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
275static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 283static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
276#ifdef CONFIG_FAIL_MAKE_REQUEST 284#ifdef CONFIG_FAIL_MAKE_REQUEST
277static struct device_attribute dev_attr_fail = 285static struct device_attribute dev_attr_fail =
@@ -282,6 +290,7 @@ static struct attribute *part_attrs[] = {
282 &dev_attr_partition.attr, 290 &dev_attr_partition.attr,
283 &dev_attr_start.attr, 291 &dev_attr_start.attr,
284 &dev_attr_size.attr, 292 &dev_attr_size.attr,
293 &dev_attr_alignment_offset.attr,
285 &dev_attr_stat.attr, 294 &dev_attr_stat.attr,
286#ifdef CONFIG_FAIL_MAKE_REQUEST 295#ifdef CONFIG_FAIL_MAKE_REQUEST
287 &dev_attr_fail.attr, 296 &dev_attr_fail.attr,
@@ -383,6 +392,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
383 pdev = part_to_dev(p); 392 pdev = part_to_dev(p);
384 393
385 p->start_sect = start; 394 p->start_sect = start;
395 p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
386 p->nr_sects = len; 396 p->nr_sects = len;
387 p->partno = partno; 397 p->partno = partno;
388 p->policy = get_disk_ro(disk); 398 p->policy = get_disk_ro(disk);
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 46297683cd34..fc71aab08460 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -76,7 +76,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
76 Sector sect; 76 Sector sect;
77 77
78 res = 0; 78 res = 0;
79 blocksize = bdev_hardsect_size(bdev); 79 blocksize = bdev_logical_block_size(bdev);
80 if (blocksize <= 0) 80 if (blocksize <= 0)
81 goto out_exit; 81 goto out_exit;
82 i_size = i_size_read(bdev->bd_inode); 82 i_size = i_size_read(bdev->bd_inode);
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 796511886f28..0028d2ef0662 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -110,7 +110,7 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
110 Sector sect; 110 Sector sect;
111 unsigned char *data; 111 unsigned char *data;
112 u32 this_sector, this_size; 112 u32 this_sector, this_size;
113 int sector_size = bdev_hardsect_size(bdev) / 512; 113 int sector_size = bdev_logical_block_size(bdev) / 512;
114 int loopct = 0; /* number of links followed 114 int loopct = 0; /* number of links followed
115 without finding a data partition */ 115 without finding a data partition */
116 int i; 116 int i;
@@ -415,7 +415,7 @@ static struct {
415 415
416int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) 416int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
417{ 417{
418 int sector_size = bdev_hardsect_size(bdev) / 512; 418 int sector_size = bdev_logical_block_size(bdev) / 512;
419 Sector sect; 419 Sector sect;
420 unsigned char *data; 420 unsigned char *data;
421 struct partition *p; 421 struct partition *p;
diff --git a/fs/pipe.c b/fs/pipe.c
index 13414ec45b8d..f7dd21ad85a6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -302,6 +302,20 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info,
302 return 0; 302 return 0;
303} 303}
304 304
305/**
306 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
307 * @pipe: the pipe that the buffer belongs to
308 * @buf: the buffer to put a reference to
309 *
310 * Description:
311 * This function releases a reference to @buf.
312 */
313void generic_pipe_buf_release(struct pipe_inode_info *pipe,
314 struct pipe_buffer *buf)
315{
316 page_cache_release(buf->page);
317}
318
305static const struct pipe_buf_operations anon_pipe_buf_ops = { 319static const struct pipe_buf_operations anon_pipe_buf_ops = {
306 .can_merge = 1, 320 .can_merge = 1,
307 .map = generic_pipe_buf_map, 321 .map = generic_pipe_buf_map,
diff --git a/fs/read_write.c b/fs/read_write.c
index 9d1e76bb9ee1..6c8c55dec2bc 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -805,12 +805,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
805 goto out; 805 goto out;
806 if (!(in_file->f_mode & FMODE_READ)) 806 if (!(in_file->f_mode & FMODE_READ))
807 goto fput_in; 807 goto fput_in;
808 retval = -EINVAL;
809 in_inode = in_file->f_path.dentry->d_inode;
810 if (!in_inode)
811 goto fput_in;
812 if (!in_file->f_op || !in_file->f_op->splice_read)
813 goto fput_in;
814 retval = -ESPIPE; 808 retval = -ESPIPE;
815 if (!ppos) 809 if (!ppos)
816 ppos = &in_file->f_pos; 810 ppos = &in_file->f_pos;
@@ -834,6 +828,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
834 retval = -EINVAL; 828 retval = -EINVAL;
835 if (!out_file->f_op || !out_file->f_op->sendpage) 829 if (!out_file->f_op || !out_file->f_op->sendpage)
836 goto fput_out; 830 goto fput_out;
831 in_inode = in_file->f_path.dentry->d_inode;
837 out_inode = out_file->f_path.dentry->d_inode; 832 out_inode = out_file->f_path.dentry->d_inode;
838 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); 833 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
839 if (retval < 0) 834 if (retval < 0)
diff --git a/fs/splice.c b/fs/splice.c
index 666953d59a35..73766d24f97b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -507,9 +507,131 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
507 507
508 return ret; 508 return ret;
509} 509}
510
511EXPORT_SYMBOL(generic_file_splice_read); 510EXPORT_SYMBOL(generic_file_splice_read);
512 511
512static const struct pipe_buf_operations default_pipe_buf_ops = {
513 .can_merge = 0,
514 .map = generic_pipe_buf_map,
515 .unmap = generic_pipe_buf_unmap,
516 .confirm = generic_pipe_buf_confirm,
517 .release = generic_pipe_buf_release,
518 .steal = generic_pipe_buf_steal,
519 .get = generic_pipe_buf_get,
520};
521
522static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
523 unsigned long vlen, loff_t offset)
524{
525 mm_segment_t old_fs;
526 loff_t pos = offset;
527 ssize_t res;
528
529 old_fs = get_fs();
530 set_fs(get_ds());
531 /* The cast to a user pointer is valid due to the set_fs() */
532 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
533 set_fs(old_fs);
534
535 return res;
536}
537
538static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
539 loff_t pos)
540{
541 mm_segment_t old_fs;
542 ssize_t res;
543
544 old_fs = get_fs();
545 set_fs(get_ds());
546 /* The cast to a user pointer is valid due to the set_fs() */
547 res = vfs_write(file, (const char __user *)buf, count, &pos);
548 set_fs(old_fs);
549
550 return res;
551}
552
553ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
554 struct pipe_inode_info *pipe, size_t len,
555 unsigned int flags)
556{
557 unsigned int nr_pages;
558 unsigned int nr_freed;
559 size_t offset;
560 struct page *pages[PIPE_BUFFERS];
561 struct partial_page partial[PIPE_BUFFERS];
562 struct iovec vec[PIPE_BUFFERS];
563 pgoff_t index;
564 ssize_t res;
565 size_t this_len;
566 int error;
567 int i;
568 struct splice_pipe_desc spd = {
569 .pages = pages,
570 .partial = partial,
571 .flags = flags,
572 .ops = &default_pipe_buf_ops,
573 .spd_release = spd_release_page,
574 };
575
576 index = *ppos >> PAGE_CACHE_SHIFT;
577 offset = *ppos & ~PAGE_CACHE_MASK;
578 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
579
580 for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) {
581 struct page *page;
582
583 page = alloc_page(GFP_USER);
584 error = -ENOMEM;
585 if (!page)
586 goto err;
587
588 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
589 vec[i].iov_base = (void __user *) page_address(page);
590 vec[i].iov_len = this_len;
591 pages[i] = page;
592 spd.nr_pages++;
593 len -= this_len;
594 offset = 0;
595 }
596
597 res = kernel_readv(in, vec, spd.nr_pages, *ppos);
598 if (res < 0) {
599 error = res;
600 goto err;
601 }
602
603 error = 0;
604 if (!res)
605 goto err;
606
607 nr_freed = 0;
608 for (i = 0; i < spd.nr_pages; i++) {
609 this_len = min_t(size_t, vec[i].iov_len, res);
610 partial[i].offset = 0;
611 partial[i].len = this_len;
612 if (!this_len) {
613 __free_page(pages[i]);
614 pages[i] = NULL;
615 nr_freed++;
616 }
617 res -= this_len;
618 }
619 spd.nr_pages -= nr_freed;
620
621 res = splice_to_pipe(pipe, &spd);
622 if (res > 0)
623 *ppos += res;
624
625 return res;
626
627err:
628 for (i = 0; i < spd.nr_pages; i++)
629 __free_page(pages[i]);
630
631 return error;
632}
633EXPORT_SYMBOL(default_file_splice_read);
634
513/* 635/*
514 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 636 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
515 * using sendpage(). Return the number of bytes sent. 637 * using sendpage(). Return the number of bytes sent.
@@ -881,6 +1003,36 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
881 1003
882EXPORT_SYMBOL(generic_file_splice_write); 1004EXPORT_SYMBOL(generic_file_splice_write);
883 1005
1006static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1007 struct splice_desc *sd)
1008{
1009 int ret;
1010 void *data;
1011
1012 ret = buf->ops->confirm(pipe, buf);
1013 if (ret)
1014 return ret;
1015
1016 data = buf->ops->map(pipe, buf, 0);
1017 ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
1018 buf->ops->unmap(pipe, buf, data);
1019
1020 return ret;
1021}
1022
1023static ssize_t default_file_splice_write(struct pipe_inode_info *pipe,
1024 struct file *out, loff_t *ppos,
1025 size_t len, unsigned int flags)
1026{
1027 ssize_t ret;
1028
1029 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf);
1030 if (ret > 0)
1031 *ppos += ret;
1032
1033 return ret;
1034}
1035
884/** 1036/**
885 * generic_splice_sendpage - splice data from a pipe to a socket 1037 * generic_splice_sendpage - splice data from a pipe to a socket
886 * @pipe: pipe to splice from 1038 * @pipe: pipe to splice from
@@ -908,11 +1060,10 @@ EXPORT_SYMBOL(generic_splice_sendpage);
908static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 1060static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
909 loff_t *ppos, size_t len, unsigned int flags) 1061 loff_t *ppos, size_t len, unsigned int flags)
910{ 1062{
1063 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
1064 loff_t *, size_t, unsigned int);
911 int ret; 1065 int ret;
912 1066
913 if (unlikely(!out->f_op || !out->f_op->splice_write))
914 return -EINVAL;
915
916 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1067 if (unlikely(!(out->f_mode & FMODE_WRITE)))
917 return -EBADF; 1068 return -EBADF;
918 1069
@@ -923,7 +1074,11 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
923 if (unlikely(ret < 0)) 1074 if (unlikely(ret < 0))
924 return ret; 1075 return ret;
925 1076
926 return out->f_op->splice_write(pipe, out, ppos, len, flags); 1077 splice_write = out->f_op->splice_write;
1078 if (!splice_write)
1079 splice_write = default_file_splice_write;
1080
1081 return splice_write(pipe, out, ppos, len, flags);
927} 1082}
928 1083
929/* 1084/*
@@ -933,11 +1088,10 @@ static long do_splice_to(struct file *in, loff_t *ppos,
933 struct pipe_inode_info *pipe, size_t len, 1088 struct pipe_inode_info *pipe, size_t len,
934 unsigned int flags) 1089 unsigned int flags)
935{ 1090{
1091 ssize_t (*splice_read)(struct file *, loff_t *,
1092 struct pipe_inode_info *, size_t, unsigned int);
936 int ret; 1093 int ret;
937 1094
938 if (unlikely(!in->f_op || !in->f_op->splice_read))
939 return -EINVAL;
940
941 if (unlikely(!(in->f_mode & FMODE_READ))) 1095 if (unlikely(!(in->f_mode & FMODE_READ)))
942 return -EBADF; 1096 return -EBADF;
943 1097
@@ -945,7 +1099,11 @@ static long do_splice_to(struct file *in, loff_t *ppos,
945 if (unlikely(ret < 0)) 1099 if (unlikely(ret < 0))
946 return ret; 1100 return ret;
947 1101
948 return in->f_op->splice_read(in, ppos, pipe, len, flags); 1102 splice_read = in->f_op->splice_read;
1103 if (!splice_read)
1104 splice_read = default_file_splice_read;
1105
1106 return splice_read(in, ppos, pipe, len, flags);
949} 1107}
950 1108
951/** 1109/**
@@ -1112,6 +1270,9 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1112 return ret; 1270 return ret;
1113} 1271}
1114 1272
1273static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1274 struct pipe_inode_info *opipe,
1275 size_t len, unsigned int flags);
1115/* 1276/*
1116 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same 1277 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1117 * location, so checking ->i_pipe is not enough to verify that this is a 1278 * location, so checking ->i_pipe is not enough to verify that this is a
@@ -1132,12 +1293,32 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1132 struct file *out, loff_t __user *off_out, 1293 struct file *out, loff_t __user *off_out,
1133 size_t len, unsigned int flags) 1294 size_t len, unsigned int flags)
1134{ 1295{
1135 struct pipe_inode_info *pipe; 1296 struct pipe_inode_info *ipipe;
1297 struct pipe_inode_info *opipe;
1136 loff_t offset, *off; 1298 loff_t offset, *off;
1137 long ret; 1299 long ret;
1138 1300
1139 pipe = pipe_info(in->f_path.dentry->d_inode); 1301 ipipe = pipe_info(in->f_path.dentry->d_inode);
1140 if (pipe) { 1302 opipe = pipe_info(out->f_path.dentry->d_inode);
1303
1304 if (ipipe && opipe) {
1305 if (off_in || off_out)
1306 return -ESPIPE;
1307
1308 if (!(in->f_mode & FMODE_READ))
1309 return -EBADF;
1310
1311 if (!(out->f_mode & FMODE_WRITE))
1312 return -EBADF;
1313
1314 /* Splicing to self would be fun, but... */
1315 if (ipipe == opipe)
1316 return -EINVAL;
1317
1318 return splice_pipe_to_pipe(ipipe, opipe, len, flags);
1319 }
1320
1321 if (ipipe) {
1141 if (off_in) 1322 if (off_in)
1142 return -ESPIPE; 1323 return -ESPIPE;
1143 if (off_out) { 1324 if (off_out) {
@@ -1149,7 +1330,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1149 } else 1330 } else
1150 off = &out->f_pos; 1331 off = &out->f_pos;
1151 1332
1152 ret = do_splice_from(pipe, out, off, len, flags); 1333 ret = do_splice_from(ipipe, out, off, len, flags);
1153 1334
1154 if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1335 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1155 ret = -EFAULT; 1336 ret = -EFAULT;
@@ -1157,8 +1338,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1157 return ret; 1338 return ret;
1158 } 1339 }
1159 1340
1160 pipe = pipe_info(out->f_path.dentry->d_inode); 1341 if (opipe) {
1161 if (pipe) {
1162 if (off_out) 1342 if (off_out)
1163 return -ESPIPE; 1343 return -ESPIPE;
1164 if (off_in) { 1344 if (off_in) {
@@ -1170,7 +1350,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1170 } else 1350 } else
1171 off = &in->f_pos; 1351 off = &in->f_pos;
1172 1352
1173 ret = do_splice_to(in, off, pipe, len, flags); 1353 ret = do_splice_to(in, off, opipe, len, flags);
1174 1354
1175 if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1355 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1176 ret = -EFAULT; 1356 ret = -EFAULT;
@@ -1511,7 +1691,7 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
1511 * Make sure there's data to read. Wait for input if we can, otherwise 1691 * Make sure there's data to read. Wait for input if we can, otherwise
1512 * return an appropriate error. 1692 * return an appropriate error.
1513 */ 1693 */
1514static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1694static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1515{ 1695{
1516 int ret; 1696 int ret;
1517 1697
@@ -1549,7 +1729,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1549 * Make sure there's writeable room. Wait for room if we can, otherwise 1729 * Make sure there's writeable room. Wait for room if we can, otherwise
1550 * return an appropriate error. 1730 * return an appropriate error.
1551 */ 1731 */
1552static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1732static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1553{ 1733{
1554 int ret; 1734 int ret;
1555 1735
@@ -1587,6 +1767,124 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1587} 1767}
1588 1768
1589/* 1769/*
1770 * Splice contents of ipipe to opipe.
1771 */
1772static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1773 struct pipe_inode_info *opipe,
1774 size_t len, unsigned int flags)
1775{
1776 struct pipe_buffer *ibuf, *obuf;
1777 int ret = 0, nbuf;
1778 bool input_wakeup = false;
1779
1780
1781retry:
1782 ret = ipipe_prep(ipipe, flags);
1783 if (ret)
1784 return ret;
1785
1786 ret = opipe_prep(opipe, flags);
1787 if (ret)
1788 return ret;
1789
1790 /*
1791 * Potential ABBA deadlock, work around it by ordering lock
1792 * grabbing by pipe info address. Otherwise two different processes
1793 * could deadlock (one doing tee from A -> B, the other from B -> A).
1794 */
1795 pipe_double_lock(ipipe, opipe);
1796
1797 do {
1798 if (!opipe->readers) {
1799 send_sig(SIGPIPE, current, 0);
1800 if (!ret)
1801 ret = -EPIPE;
1802 break;
1803 }
1804
1805 if (!ipipe->nrbufs && !ipipe->writers)
1806 break;
1807
1808 /*
1809 * Cannot make any progress, because either the input
1810 * pipe is empty or the output pipe is full.
1811 */
1812 if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) {
1813 /* Already processed some buffers, break */
1814 if (ret)
1815 break;
1816
1817 if (flags & SPLICE_F_NONBLOCK) {
1818 ret = -EAGAIN;
1819 break;
1820 }
1821
1822 /*
1823 * We raced with another reader/writer and haven't
1824 * managed to process any buffers. A zero return
1825 * value means EOF, so retry instead.
1826 */
1827 pipe_unlock(ipipe);
1828 pipe_unlock(opipe);
1829 goto retry;
1830 }
1831
1832 ibuf = ipipe->bufs + ipipe->curbuf;
1833 nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS;
1834 obuf = opipe->bufs + nbuf;
1835
1836 if (len >= ibuf->len) {
1837 /*
1838 * Simply move the whole buffer from ipipe to opipe
1839 */
1840 *obuf = *ibuf;
1841 ibuf->ops = NULL;
1842 opipe->nrbufs++;
1843 ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS;
1844 ipipe->nrbufs--;
1845 input_wakeup = true;
1846 } else {
1847 /*
1848 * Get a reference to this pipe buffer,
1849 * so we can copy the contents over.
1850 */
1851 ibuf->ops->get(ipipe, ibuf);
1852 *obuf = *ibuf;
1853
1854 /*
1855 * Don't inherit the gift flag, we need to
1856 * prevent multiple steals of this page.
1857 */
1858 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1859
1860 obuf->len = len;
1861 opipe->nrbufs++;
1862 ibuf->offset += obuf->len;
1863 ibuf->len -= obuf->len;
1864 }
1865 ret += obuf->len;
1866 len -= obuf->len;
1867 } while (len);
1868
1869 pipe_unlock(ipipe);
1870 pipe_unlock(opipe);
1871
1872 /*
1873 * If we put data in the output pipe, wakeup any potential readers.
1874 */
1875 if (ret > 0) {
1876 smp_mb();
1877 if (waitqueue_active(&opipe->wait))
1878 wake_up_interruptible(&opipe->wait);
1879 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1880 }
1881 if (input_wakeup)
1882 wakeup_pipe_writers(ipipe);
1883
1884 return ret;
1885}
1886
1887/*
1590 * Link contents of ipipe to opipe. 1888 * Link contents of ipipe to opipe.
1591 */ 1889 */
1592static int link_pipe(struct pipe_inode_info *ipipe, 1890static int link_pipe(struct pipe_inode_info *ipipe,
@@ -1690,9 +1988,9 @@ static long do_tee(struct file *in, struct file *out, size_t len,
1690 * Keep going, unless we encounter an error. The ipipe/opipe 1988 * Keep going, unless we encounter an error. The ipipe/opipe
1691 * ordering doesn't really matter. 1989 * ordering doesn't really matter.
1692 */ 1990 */
1693 ret = link_ipipe_prep(ipipe, flags); 1991 ret = ipipe_prep(ipipe, flags);
1694 if (!ret) { 1992 if (!ret) {
1695 ret = link_opipe_prep(opipe, flags); 1993 ret = opipe_prep(opipe, flags);
1696 if (!ret) 1994 if (!ret)
1697 ret = link_pipe(ipipe, opipe, len, flags); 1995 ret = link_pipe(ipipe, opipe, len, flags);
1698 } 1996 }
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 72348cc855a4..0ba44107d8f1 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1915,7 +1915,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1917 } else { 1917 } else {
1918 uopt.blocksize = bdev_hardsect_size(sb->s_bdev); 1918 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { 1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
1921 if (!silent) 1921 if (!silent)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e28800a9f2b5..1418b916fc27 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1501,7 +1501,7 @@ xfs_setsize_buftarg_early(
1501 struct block_device *bdev) 1501 struct block_device *bdev)
1502{ 1502{
1503 return xfs_setsize_buftarg_flags(btp, 1503 return xfs_setsize_buftarg_flags(btp,
1504 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0); 1504 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1505} 1505}
1506 1506
1507int 1507int
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b214fd672a2..12737be58601 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
218#define bio_sectors(bio) ((bio)->bi_size >> 9) 218#define bio_sectors(bio) ((bio)->bi_size >> 9)
219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
220 220
221static inline unsigned int bio_cur_sectors(struct bio *bio) 221static inline unsigned int bio_cur_bytes(struct bio *bio)
222{ 222{
223 if (bio->bi_vcnt) 223 if (bio->bi_vcnt)
224 return bio_iovec(bio)->bv_len >> 9; 224 return bio_iovec(bio)->bv_len;
225 else /* dataless requests such as discard */ 225 else /* dataless requests such as discard */
226 return bio->bi_size >> 9; 226 return bio->bi_size;
227} 227}
228 228
229static inline void *bio_data(struct bio *bio) 229static inline void *bio_data(struct bio *bio)
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 279#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
280 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 280 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 281#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) 282 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
283#define BIO_SEG_BOUNDARY(q, b1, b2) \ 283#define BIO_SEG_BOUNDARY(q, b1, b2) \
284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
285 285
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
506} 506}
507 507
508/* 508/*
509 * BIO list managment for use by remapping drivers (e.g. DM or MD). 509 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
510 * 510 *
511 * A bio_list anchors a singly-linked list of bios chained through the bi_next 511 * A bio_list anchors a singly-linked list of bios chained through the bi_next
512 * member of the bio. The bio_list also caches the last list member to allow 512 * member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4f71f1a4af7..ebdfde8fe556 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,9 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 166 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 167 unsigned long atomic_flags;
168 168
169 /* Maintain bio traversal state for part by part I/O submission. 169 /* the following two fields are internal, NEVER access directly */
170 * hard_* are block layer internals, no driver should touch them! 170 sector_t __sector; /* sector cursor */
171 */ 171 unsigned int __data_len; /* total data len */
172
173 sector_t sector; /* next sector to submit */
174 sector_t hard_sector; /* next sector to complete */
175 unsigned long nr_sectors; /* no. of sectors left to submit */
176 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
177 /* no. of sectors left to submit in the current segment */
178 unsigned int current_nr_sectors;
179
180 /* no. of sectors left to complete in the current segment */
181 unsigned int hard_cur_sectors;
182 172
183 struct bio *bio; 173 struct bio *bio;
184 struct bio *biotail; 174 struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
211 201
212 unsigned short ioprio; 202 unsigned short ioprio;
213 203
214 void *special; 204 void *special; /* opaque pointer available for LLD use */
215 char *buffer; 205 char *buffer; /* kaddr of the current segment if available */
216 206
217 int tag; 207 int tag;
218 int errors; 208 int errors;
@@ -226,10 +216,9 @@ struct request {
226 unsigned char __cmd[BLK_MAX_CDB]; 216 unsigned char __cmd[BLK_MAX_CDB];
227 unsigned char *cmd; 217 unsigned char *cmd;
228 218
229 unsigned int data_len;
230 unsigned int extra_len; /* length of alignment and padding */ 219 unsigned int extra_len; /* length of alignment and padding */
231 unsigned int sense_len; 220 unsigned int sense_len;
232 void *data; 221 unsigned int resid_len; /* residual count */
233 void *sense; 222 void *sense;
234 223
235 unsigned long deadline; 224 unsigned long deadline;
@@ -318,6 +307,26 @@ struct blk_cmd_filter {
318 struct kobject kobj; 307 struct kobject kobj;
319}; 308};
320 309
310struct queue_limits {
311 unsigned long bounce_pfn;
312 unsigned long seg_boundary_mask;
313
314 unsigned int max_hw_sectors;
315 unsigned int max_sectors;
316 unsigned int max_segment_size;
317 unsigned int physical_block_size;
318 unsigned int alignment_offset;
319 unsigned int io_min;
320 unsigned int io_opt;
321
322 unsigned short logical_block_size;
323 unsigned short max_hw_segments;
324 unsigned short max_phys_segments;
325
326 unsigned char misaligned;
327 unsigned char no_cluster;
328};
329
321struct request_queue 330struct request_queue
322{ 331{
323 /* 332 /*
@@ -369,7 +378,6 @@ struct request_queue
369 /* 378 /*
370 * queue needs bounce pages for pages above this limit 379 * queue needs bounce pages for pages above this limit
371 */ 380 */
372 unsigned long bounce_pfn;
373 gfp_t bounce_gfp; 381 gfp_t bounce_gfp;
374 382
375 /* 383 /*
@@ -398,14 +406,6 @@ struct request_queue
398 unsigned int nr_congestion_off; 406 unsigned int nr_congestion_off;
399 unsigned int nr_batching; 407 unsigned int nr_batching;
400 408
401 unsigned int max_sectors;
402 unsigned int max_hw_sectors;
403 unsigned short max_phys_segments;
404 unsigned short max_hw_segments;
405 unsigned short hardsect_size;
406 unsigned int max_segment_size;
407
408 unsigned long seg_boundary_mask;
409 void *dma_drain_buffer; 409 void *dma_drain_buffer;
410 unsigned int dma_drain_size; 410 unsigned int dma_drain_size;
411 unsigned int dma_pad_mask; 411 unsigned int dma_pad_mask;
@@ -415,12 +415,14 @@ struct request_queue
415 struct list_head tag_busy_list; 415 struct list_head tag_busy_list;
416 416
417 unsigned int nr_sorted; 417 unsigned int nr_sorted;
418 unsigned int in_flight; 418 unsigned int in_flight[2];
419 419
420 unsigned int rq_timeout; 420 unsigned int rq_timeout;
421 struct timer_list timeout; 421 struct timer_list timeout;
422 struct list_head timeout_list; 422 struct list_head timeout_list;
423 423
424 struct queue_limits limits;
425
424 /* 426 /*
425 * sg stuff 427 * sg stuff
426 */ 428 */
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
522 __clear_bit(flag, &q->queue_flags); 524 __clear_bit(flag, &q->queue_flags);
523} 525}
524 526
527static inline int queue_in_flight(struct request_queue *q)
528{
529 return q->in_flight[0] + q->in_flight[1];
530}
531
525static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 532static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
526{ 533{
527 WARN_ON_ONCE(!queue_is_locked(q)); 534 WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
752extern void blk_put_request(struct request *); 759extern void blk_put_request(struct request *);
753extern void __blk_put_request(struct request_queue *, struct request *); 760extern void __blk_put_request(struct request_queue *, struct request *);
754extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 761extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
762extern struct request *blk_make_request(struct request_queue *, struct bio *,
763 gfp_t);
755extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 764extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
756extern void blk_requeue_request(struct request_queue *, struct request *); 765extern void blk_requeue_request(struct request_queue *, struct request *);
757extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 766extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
758extern int blk_lld_busy(struct request_queue *q); 767extern int blk_lld_busy(struct request_queue *q);
768extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
769 struct bio_set *bs, gfp_t gfp_mask,
770 int (*bio_ctr)(struct bio *, struct bio *, void *),
771 void *data);
772extern void blk_rq_unprep_clone(struct request *rq);
759extern int blk_insert_cloned_request(struct request_queue *q, 773extern int blk_insert_cloned_request(struct request_queue *q,
760 struct request *rq); 774 struct request *rq);
761extern void blk_plug_device(struct request_queue *); 775extern void blk_plug_device(struct request_queue *);
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
768 struct scsi_ioctl_command __user *); 782 struct scsi_ioctl_command __user *);
769 783
770/* 784/*
771 * Temporary export, until SCSI gets fixed up.
772 */
773extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
774 struct bio *bio);
775
776/*
777 * A queue has just exitted congestion. Note this in the global counter of 785 * A queue has just exitted congestion. Note this in the global counter of
778 * congested queues, and wake up anyone who was waiting for requests to be 786 * congested queues, and wake up anyone who was waiting for requests to be
779 * put back. 787 * put back.
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q);
798extern void __blk_stop_queue(struct request_queue *q); 806extern void __blk_stop_queue(struct request_queue *q);
799extern void __blk_run_queue(struct request_queue *); 807extern void __blk_run_queue(struct request_queue *);
800extern void blk_run_queue(struct request_queue *); 808extern void blk_run_queue(struct request_queue *);
801extern void blk_start_queueing(struct request_queue *);
802extern int blk_rq_map_user(struct request_queue *, struct request *, 809extern int blk_rq_map_user(struct request_queue *, struct request *,
803 struct rq_map_data *, void __user *, unsigned long, 810 struct rq_map_data *, void __user *, unsigned long,
804 gfp_t); 811 gfp_t);
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
831 blk_run_backing_dev(mapping->backing_dev_info, NULL); 838 blk_run_backing_dev(mapping->backing_dev_info, NULL);
832} 839}
833 840
834extern void blkdev_dequeue_request(struct request *req); 841/*
842 * blk_rq_pos() : the current sector
843 * blk_rq_bytes() : bytes left in the entire request
844 * blk_rq_cur_bytes() : bytes left in the current segment
845 * blk_rq_sectors() : sectors left in the entire request
846 * blk_rq_cur_sectors() : sectors left in the current segment
847 */
848static inline sector_t blk_rq_pos(const struct request *rq)
849{
850 return rq->__sector;
851}
852
853static inline unsigned int blk_rq_bytes(const struct request *rq)
854{
855 return rq->__data_len;
856}
857
858static inline int blk_rq_cur_bytes(const struct request *rq)
859{
860 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
861}
862
863static inline unsigned int blk_rq_sectors(const struct request *rq)
864{
865 return blk_rq_bytes(rq) >> 9;
866}
867
868static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
869{
870 return blk_rq_cur_bytes(rq) >> 9;
871}
872
873/*
874 * Request issue related functions.
875 */
876extern struct request *blk_peek_request(struct request_queue *q);
877extern void blk_start_request(struct request *rq);
878extern struct request *blk_fetch_request(struct request_queue *q);
835 879
836/* 880/*
837 * blk_end_request() and friends. 881 * Request completion related functions.
838 * __blk_end_request() and end_request() must be called with 882 *
839 * the request queue spinlock acquired. 883 * blk_update_request() completes given number of bytes and updates
884 * the request without completing it.
885 *
886 * blk_end_request() and friends. __blk_end_request() must be called
887 * with the request queue spinlock acquired.
840 * 888 *
841 * Several drivers define their own end_request and call 889 * Several drivers define their own end_request and call
842 * blk_end_request() for parts of the original function. 890 * blk_end_request() for parts of the original function.
843 * This prevents code duplication in drivers. 891 * This prevents code duplication in drivers.
844 */ 892 */
845extern int blk_end_request(struct request *rq, int error, 893extern bool blk_update_request(struct request *rq, int error,
846 unsigned int nr_bytes); 894 unsigned int nr_bytes);
847extern int __blk_end_request(struct request *rq, int error, 895extern bool blk_end_request(struct request *rq, int error,
848 unsigned int nr_bytes); 896 unsigned int nr_bytes);
849extern int blk_end_bidi_request(struct request *rq, int error, 897extern void blk_end_request_all(struct request *rq, int error);
850 unsigned int nr_bytes, unsigned int bidi_bytes); 898extern bool blk_end_request_cur(struct request *rq, int error);
851extern void end_request(struct request *, int); 899extern bool __blk_end_request(struct request *rq, int error,
852extern int blk_end_request_callback(struct request *rq, int error, 900 unsigned int nr_bytes);
853 unsigned int nr_bytes, 901extern void __blk_end_request_all(struct request *rq, int error);
854 int (drv_callback)(struct request *)); 902extern bool __blk_end_request_cur(struct request *rq, int error);
903
855extern void blk_complete_request(struct request *); 904extern void blk_complete_request(struct request *);
856extern void __blk_complete_request(struct request *); 905extern void __blk_complete_request(struct request *);
857extern void blk_abort_request(struct request *); 906extern void blk_abort_request(struct request *);
858extern void blk_abort_queue(struct request_queue *); 907extern void blk_abort_queue(struct request_queue *);
859extern void blk_update_request(struct request *rq, int error,
860 unsigned int nr_bytes);
861
862/*
863 * blk_end_request() takes bytes instead of sectors as a complete size.
864 * blk_rq_bytes() returns bytes left to complete in the entire request.
865 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
866 */
867extern unsigned int blk_rq_bytes(struct request *rq);
868extern unsigned int blk_rq_cur_bytes(struct request *rq);
869 908
870/* 909/*
871 * Access functions for manipulating queue properties 910 * Access functions for manipulating queue properties
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *);
877extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 916extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
878extern void blk_queue_bounce_limit(struct request_queue *, u64); 917extern void blk_queue_bounce_limit(struct request_queue *, u64);
879extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 918extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
919extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
880extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 920extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
881extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 921extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
882extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 922extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
883extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 923extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
924extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
925extern void blk_queue_alignment_offset(struct request_queue *q,
926 unsigned int alignment);
927extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
928extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
929extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
930 sector_t offset);
931extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
932 sector_t offset);
884extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 933extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
885extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 934extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
886extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 935extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
967 1016
968#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1017#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
969 1018
970static inline int queue_hardsect_size(struct request_queue *q) 1019static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1020{
1021 return q->limits.bounce_pfn;
1022}
1023
1024static inline unsigned long queue_segment_boundary(struct request_queue *q)
1025{
1026 return q->limits.seg_boundary_mask;
1027}
1028
1029static inline unsigned int queue_max_sectors(struct request_queue *q)
1030{
1031 return q->limits.max_sectors;
1032}
1033
1034static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1035{
1036 return q->limits.max_hw_sectors;
1037}
1038
1039static inline unsigned short queue_max_hw_segments(struct request_queue *q)
1040{
1041 return q->limits.max_hw_segments;
1042}
1043
1044static inline unsigned short queue_max_phys_segments(struct request_queue *q)
1045{
1046 return q->limits.max_phys_segments;
1047}
1048
1049static inline unsigned int queue_max_segment_size(struct request_queue *q)
1050{
1051 return q->limits.max_segment_size;
1052}
1053
1054static inline unsigned short queue_logical_block_size(struct request_queue *q)
971{ 1055{
972 int retval = 512; 1056 int retval = 512;
973 1057
974 if (q && q->hardsect_size) 1058 if (q && q->limits.logical_block_size)
975 retval = q->hardsect_size; 1059 retval = q->limits.logical_block_size;
976 1060
977 return retval; 1061 return retval;
978} 1062}
979 1063
980static inline int bdev_hardsect_size(struct block_device *bdev) 1064static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1065{
1066 return queue_logical_block_size(bdev_get_queue(bdev));
1067}
1068
1069static inline unsigned int queue_physical_block_size(struct request_queue *q)
1070{
1071 return q->limits.physical_block_size;
1072}
1073
1074static inline unsigned int queue_io_min(struct request_queue *q)
1075{
1076 return q->limits.io_min;
1077}
1078
1079static inline unsigned int queue_io_opt(struct request_queue *q)
1080{
1081 return q->limits.io_opt;
1082}
1083
1084static inline int queue_alignment_offset(struct request_queue *q)
1085{
1086 if (q && q->limits.misaligned)
1087 return -1;
1088
1089 if (q && q->limits.alignment_offset)
1090 return q->limits.alignment_offset;
1091
1092 return 0;
1093}
1094
1095static inline int queue_sector_alignment_offset(struct request_queue *q,
1096 sector_t sector)
981{ 1097{
982 return queue_hardsect_size(bdev_get_queue(bdev)); 1098 return ((sector << 9) - q->limits.alignment_offset)
1099 & (q->limits.io_min - 1);
983} 1100}
984 1101
985static inline int queue_dma_alignment(struct request_queue *q) 1102static inline int queue_dma_alignment(struct request_queue *q)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ded2d7c42668..49c2362977fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -149,7 +149,7 @@ struct io_restrictions {
149 unsigned max_hw_sectors; 149 unsigned max_hw_sectors;
150 unsigned max_sectors; 150 unsigned max_sectors;
151 unsigned max_segment_size; 151 unsigned max_segment_size;
152 unsigned short hardsect_size; 152 unsigned short logical_block_size;
153 unsigned short max_hw_segments; 153 unsigned short max_hw_segments;
154 unsigned short max_phys_segments; 154 unsigned short max_phys_segments;
155 unsigned char no_cluster; /* inverted so that 0 is default */ 155 unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769f62b0..1cb3372e65d8 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
103extern void elv_merge_requests(struct request_queue *, struct request *, 103extern void elv_merge_requests(struct request_queue *, struct request *,
104 struct request *); 104 struct request *);
105extern void elv_merged_request(struct request_queue *, struct request *, int); 105extern void elv_merged_request(struct request_queue *, struct request *, int);
106extern void elv_dequeue_request(struct request_queue *, struct request *);
107extern void elv_requeue_request(struct request_queue *, struct request *); 106extern void elv_requeue_request(struct request_queue *, struct request *);
108extern int elv_queue_empty(struct request_queue *); 107extern int elv_queue_empty(struct request_queue *);
109extern struct request *elv_next_request(struct request_queue *q);
110extern struct request *elv_former_request(struct request_queue *, struct request *); 108extern struct request *elv_former_request(struct request_queue *, struct request *);
111extern struct request *elv_latter_request(struct request_queue *, struct request *); 109extern struct request *elv_latter_request(struct request_queue *, struct request *);
112extern int elv_register_queue(struct request_queue *q); 110extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
171 ELV_MQUEUE_MUST, 169 ELV_MQUEUE_MUST,
172}; 170};
173 171
174#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) 172#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
175#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 173#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
176 174
177/* 175/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b534e527e09..83d6b4397245 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov,
2205/* fs/splice.c */ 2205/* fs/splice.c */
2206extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2206extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2207 struct pipe_inode_info *, size_t, unsigned int); 2207 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t default_file_splice_read(struct file *, loff_t *,
2209 struct pipe_inode_info *, size_t, unsigned int);
2208extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2210extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2209 struct file *, loff_t *, size_t, unsigned int); 2211 struct file *, loff_t *, size_t, unsigned int);
2210extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2212extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a1a28caed23d..149fda264c86 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -90,6 +90,7 @@ struct disk_stats {
90struct hd_struct { 90struct hd_struct {
91 sector_t start_sect; 91 sector_t start_sect;
92 sector_t nr_sects; 92 sector_t nr_sects;
93 sector_t alignment_offset;
93 struct device __dev; 94 struct device __dev;
94 struct kobject *holder_dir; 95 struct kobject *holder_dir;
95 int policy, partno; 96 int policy, partno;
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987bccf89..dd05434fa45f 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
64 * and kmalloc'ed. These could be shared between processes. 64 * and kmalloc'ed. These could be shared between processes.
65 */ 65 */
66struct io_context { 66struct io_context {
67 atomic_t refcount; 67 atomic_long_t refcount;
68 atomic_t nr_tasks; 68 atomic_t nr_tasks;
69 69
70 /* all the fields below are protected by this lock */ 70 /* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
91 * if ref count is zero, don't allow sharing (ioc is going away, it's 91 * if ref count is zero, don't allow sharing (ioc is going away, it's
92 * a race). 92 * a race).
93 */ 93 */
94 if (ioc && atomic_inc_not_zero(&ioc->refcount)) { 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
95 atomic_inc(&ioc->nr_tasks); 95 atomic_long_inc(&ioc->refcount);
96 return ioc; 96 return ioc;
97 } 97 }
98 98
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40725447f5e0..66c194e2d9b9 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
56 gfp_t old_gfp_mask; 56 gfp_t old_gfp_mask;
57 57
58 spinlock_t lo_lock; 58 spinlock_t lo_lock;
59 struct bio *lo_bio; 59 struct bio_list lo_bio_list;
60 struct bio *lo_biotail;
61 int lo_state; 60 int lo_state;
62 struct mutex lo_ctl_mutex; 61 struct mutex lo_ctl_mutex;
63 struct task_struct *lo_thread; 62 struct task_struct *lo_thread;
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index 1f76b1ebf627..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,206 +0,0 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8f038554e80..b43a9e039059 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 152void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 153int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 154int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
155void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
155 156
156#endif 157#endif
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5f3faa9d15ae..18e7c7c0cae6 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
11#include <linux/pipe_fs_i.h> 11#include <linux/pipe_fs_i.h>
12 12
13/* 13/*
14 * splice is tied to pipes as a transport (at least for now), so we'll just 14 * Flags passed in from splice/tee/vmsplice
15 * add the splice flags here.
16 */ 15 */
17#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ 16#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
18#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ 17#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d29869d..be7d255fc7cf 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,10 @@
15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 15#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 16#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 17#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
18#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
19#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
20
21#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
18 22
19struct virtio_blk_config 23struct virtio_blk_config
20{ 24{
@@ -32,6 +36,7 @@ struct virtio_blk_config
32 } geometry; 36 } geometry;
33 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ 37 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
34 __u32 blk_size; 38 __u32 blk_size;
39 __u8 identify[VIRTIO_BLK_ID_BYTES];
35} __attribute__((packed)); 40} __attribute__((packed));
36 41
37/* These two define direction. */ 42/* These two define direction. */
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr
55 __u64 sector; 60 __u64 sector;
56}; 61};
57 62
63struct virtio_scsi_inhdr {
64 __u32 errors;
65 __u32 data_len;
66 __u32 sense_len;
67 __u32 residual;
68};
69
58/* And this is the final byte of the write scatter-gather list. */ 70/* And this is the final byte of the write scatter-gather list. */
59#define VIRTIO_BLK_S_OK 0 71#define VIRTIO_BLK_S_OK 0
60#define VIRTIO_BLK_S_IOERR 1 72#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d36925c..3878d1dc7f59 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
270 270
271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
272{ 272{
273 return scmd->request->sector; 273 return blk_rq_pos(scmd->request);
274} 274}
275 275
276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 53effd496a50..d6b05f42dd44 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -25,9 +25,8 @@ TRACE_EVENT(block_rq_abort,
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
29 __entry->nr_sector = blk_pc_request(rq) ? 29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
30 0 : rq->hard_nr_sectors;
31 __entry->errors = rq->errors; 30 __entry->errors = rq->errors;
32 31
33 blk_fill_rwbs_rq(__entry->rwbs, rq); 32 blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -59,10 +58,9 @@ TRACE_EVENT(block_rq_insert,
59 58
60 TP_fast_assign( 59 TP_fast_assign(
61 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
62 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 61 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
63 __entry->nr_sector = blk_pc_request(rq) ? 62 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
64 0 : rq->hard_nr_sectors; 63 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
65 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
66 64
67 blk_fill_rwbs_rq(__entry->rwbs, rq); 65 blk_fill_rwbs_rq(__entry->rwbs, rq);
68 blk_dump_cmd(__get_str(cmd), rq); 66 blk_dump_cmd(__get_str(cmd), rq);
@@ -94,10 +92,9 @@ TRACE_EVENT(block_rq_issue,
94 92
95 TP_fast_assign( 93 TP_fast_assign(
96 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 94 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
97 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 95 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
98 __entry->nr_sector = blk_pc_request(rq) ? 96 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
99 0 : rq->hard_nr_sectors; 97 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
100 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
101 98
102 blk_fill_rwbs_rq(__entry->rwbs, rq); 99 blk_fill_rwbs_rq(__entry->rwbs, rq);
103 blk_dump_cmd(__get_str(cmd), rq); 100 blk_dump_cmd(__get_str(cmd), rq);
@@ -128,9 +125,8 @@ TRACE_EVENT(block_rq_requeue,
128 125
129 TP_fast_assign( 126 TP_fast_assign(
130 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
131 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 128 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
132 __entry->nr_sector = blk_pc_request(rq) ? 129 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
133 0 : rq->hard_nr_sectors;
134 __entry->errors = rq->errors; 130 __entry->errors = rq->errors;
135 131
136 blk_fill_rwbs_rq(__entry->rwbs, rq); 132 blk_fill_rwbs_rq(__entry->rwbs, rq);
@@ -161,9 +157,8 @@ TRACE_EVENT(block_rq_complete,
161 157
162 TP_fast_assign( 158 TP_fast_assign(
163 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
164 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 160 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
165 __entry->nr_sector = blk_pc_request(rq) ? 161 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
166 0 : rq->hard_nr_sectors;
167 __entry->errors = rq->errors; 162 __entry->errors = rq->errors;
168 163
169 blk_fill_rwbs_rq(__entry->rwbs, rq); 164 blk_fill_rwbs_rq(__entry->rwbs, rq);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7bd6a9893c24..39af8af6fc30 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -669,12 +669,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
669 669
670 if (blk_pc_request(rq)) { 670 if (blk_pc_request(rq)) {
671 what |= BLK_TC_ACT(BLK_TC_PC); 671 what |= BLK_TC_ACT(BLK_TC_PC);
672 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 672 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
673 rq->cmd_len, rq->cmd); 673 what, rq->errors, rq->cmd_len, rq->cmd);
674 } else { 674 } else {
675 what |= BLK_TC_ACT(BLK_TC_FS); 675 what |= BLK_TC_ACT(BLK_TC_FS);
676 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 676 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
677 rw, what, rq->errors, 0, NULL); 677 what, rq->errors, 0, NULL);
678 } 678 }
679} 679}
680 680
@@ -881,11 +881,11 @@ void blk_add_driver_data(struct request_queue *q,
881 return; 881 return;
882 882
883 if (blk_pc_request(rq)) 883 if (blk_pc_request(rq))
884 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, 884 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
885 rq->errors, len, data); 885 BLK_TA_DRV_DATA, rq->errors, len, data);
886 else 886 else
887 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 887 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
888 0, BLK_TA_DRV_DATA, rq->errors, len, data); 888 BLK_TA_DRV_DATA, rq->errors, len, data);
889} 889}
890EXPORT_SYMBOL_GPL(blk_add_driver_data); 890EXPORT_SYMBOL_GPL(blk_add_driver_data);
891 891
@@ -1724,10 +1724,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1724 if (blk_discard_rq(rq)) 1724 if (blk_discard_rq(rq))
1725 rw |= (1 << BIO_RW_DISCARD); 1725 rw |= (1 << BIO_RW_DISCARD);
1726 1726
1727 if (blk_pc_request(rq)) 1727 bytes = blk_rq_bytes(rq);
1728 bytes = rq->data_len;
1729 else
1730 bytes = rq->hard_nr_sectors << 9;
1731 1728
1732 blk_fill_rwbs(rwbs, rw, bytes); 1729 blk_fill_rwbs(rwbs, rw, bytes);
1733} 1730}
diff --git a/mm/bounce.c b/mm/bounce.c
index 65f5e17e411a..4ebe3ea83795 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -191,7 +191,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
191 /* 191 /*
192 * is destination page below bounce pfn? 192 * is destination page below bounce pfn?
193 */ 193 */
194 if (page_to_pfn(page) <= q->bounce_pfn) 194 if (page_to_pfn(page) <= queue_bounce_pfn(q))
195 continue; 195 continue;
196 196
197 /* 197 /*
@@ -283,7 +283,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
283 * don't waste time iterating over bio segments 283 * don't waste time iterating over bio segments
284 */ 284 */
285 if (!(q->bounce_gfp & GFP_DMA)) { 285 if (!(q->bounce_gfp & GFP_DMA)) {
286 if (q->bounce_pfn >= blk_max_pfn) 286 if (queue_bounce_pfn(q) >= blk_max_pfn)
287 return; 287 return;
288 pool = page_pool; 288 pool = page_pool;
289 } else { 289 } else {