aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/barrier.txt6
-rw-r--r--Documentation/block/biodoc.txt10
-rw-r--r--Documentation/block/request.txt2
-rw-r--r--Documentation/iostats.txt2
-rw-r--r--arch/arm/plat-omap/mailbox.c8
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--block/as-iosched.c26
-rw-r--r--block/blktrace.c10
-rw-r--r--block/bsg.c12
-rw-r--r--block/cfq-iosched.c39
-rw-r--r--block/deadline-iosched.c18
-rw-r--r--block/elevator.c75
-rw-r--r--block/ll_rw_blk.c215
-rw-r--r--block/noop-iosched.c14
-rw-r--r--block/scsi_ioctl.c24
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/acorn/block/mfmhd.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/cpqarray.c6
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/lguest_blk.c2
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps2esdi.c4
-rw-r--r--drivers/block/ps3disk.c8
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/sx8.c20
-rw-r--r--drivers/block/ub.c6
-rw-r--r--drivers/block/umem.c6
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xd.h2
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/xsysace.c4
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/legacy/hd.c2
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c18
-rw-r--r--drivers/message/i2o/i2o_block.c4
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--fs/bio.c30
-rw-r--r--include/asm-arm/arch-omap/mailbox.h2
-rw-r--r--include/linux/blkdev.h140
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/elevator.h76
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/raid/md_k.h4
-rw-r--r--include/scsi/sd.h2
-rw-r--r--mm/bounce.c4
85 files changed, 529 insertions, 510 deletions
diff --git a/Documentation/block/barrier.txt b/Documentation/block/barrier.txt
index 7d279f2f5bb..2c2f24f634e 100644
--- a/Documentation/block/barrier.txt
+++ b/Documentation/block/barrier.txt
@@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is
79used to indicate the whole sequence of performing barrier requests 79used to indicate the whole sequence of performing barrier requests
80including draining and flushing. 80including draining and flushing.
81 81
82typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq); 82typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
83 83
84int blk_queue_ordered(request_queue_t *q, unsigned ordered, 84int blk_queue_ordered(struct request_queue *q, unsigned ordered,
85 prepare_flush_fn *prepare_flush_fn); 85 prepare_flush_fn *prepare_flush_fn);
86 86
87@q : the queue in question 87@q : the queue in question
@@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
92For example, SCSI disk driver's prepare_flush_fn looks like the 92For example, SCSI disk driver's prepare_flush_fn looks like the
93following. 93following.
94 94
95static void sd_prepare_flush(request_queue_t *q, struct request *rq) 95static void sd_prepare_flush(struct request_queue *q, struct request *rq)
96{ 96{
97 memset(rq->cmd, 0, sizeof(rq->cmd)); 97 memset(rq->cmd, 0, sizeof(rq->cmd));
98 rq->cmd_type = REQ_TYPE_BLOCK_PC; 98 rq->cmd_type = REQ_TYPE_BLOCK_PC;
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 3adaace328a..8af392fc6ef 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command
740queueing (typically known as tagged command queueing), ie manage more than 740queueing (typically known as tagged command queueing), ie manage more than
741one outstanding command on a queue at any given time. 741one outstanding command on a queue at any given time.
742 742
743 blk_queue_init_tags(request_queue_t *q, int depth) 743 blk_queue_init_tags(struct request_queue *q, int depth)
744 744
745 Initialize internal command tagging structures for a maximum 745 Initialize internal command tagging structures for a maximum
746 depth of 'depth'. 746 depth of 'depth'.
747 747
748 blk_queue_free_tags((request_queue_t *q) 748 blk_queue_free_tags((struct request_queue *q)
749 749
750 Teardown tag info associated with the queue. This will be done 750 Teardown tag info associated with the queue. This will be done
751 automatically by block if blk_queue_cleanup() is called on a queue 751 automatically by block if blk_queue_cleanup() is called on a queue
@@ -754,7 +754,7 @@ one outstanding command on a queue at any given time.
754The above are initialization and exit management, the main helpers during 754The above are initialization and exit management, the main helpers during
755normal operations are: 755normal operations are:
756 756
757 blk_queue_start_tag(request_queue_t *q, struct request *rq) 757 blk_queue_start_tag(struct request_queue *q, struct request *rq)
758 758
759 Start tagged operation for this request. A free tag number between 759 Start tagged operation for this request. A free tag number between
760 0 and 'depth' is assigned to the request (rq->tag holds this number), 760 0 and 'depth' is assigned to the request (rq->tag holds this number),
@@ -762,7 +762,7 @@ normal operations are:
762 for this queue is already achieved (or if the tag wasn't started for 762 for this queue is already achieved (or if the tag wasn't started for
763 some other reason), 1 is returned. Otherwise 0 is returned. 763 some other reason), 1 is returned. Otherwise 0 is returned.
764 764
765 blk_queue_end_tag(request_queue_t *q, struct request *rq) 765 blk_queue_end_tag(struct request_queue *q, struct request *rq)
766 766
767 End tagged operation on this request. 'rq' is removed from the internal 767 End tagged operation on this request. 'rq' is removed from the internal
768 book keeping structures. 768 book keeping structures.
@@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both
781the hardware and software block queue and enable the driver to sanely restart 781the hardware and software block queue and enable the driver to sanely restart
782all the outstanding requests. There's a third helper to do that: 782all the outstanding requests. There's a third helper to do that:
783 783
784 blk_queue_invalidate_tags(request_queue_t *q) 784 blk_queue_invalidate_tags(struct request_queue *q)
785 785
786 Clear the internal block tag queue and re-add all the pending requests 786 Clear the internal block tag queue and re-add all the pending requests
787 to the request queue. The driver will receive them again on the 787 to the request queue. The driver will receive them again on the
diff --git a/Documentation/block/request.txt b/Documentation/block/request.txt
index 75924e2a697..fff58acb40a 100644
--- a/Documentation/block/request.txt
+++ b/Documentation/block/request.txt
@@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request
83 83
84struct bio *biotail DBI Last bio in request 84struct bio *biotail DBI Last bio in request
85 85
86request_queue_t *q DB Request queue this request belongs to 86struct request_queue *q DB Request queue this request belongs to
87 87
88struct request_list *rl B Request list this request came from 88struct request_list *rl B Request list this request came from
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 09a1bafe252..b963c3b4afa 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing
79 measured from __make_request() to end_that_request_last()). 79 measured from __make_request() to end_that_request_last()).
80Field 9 -- # of I/Os currently in progress 80Field 9 -- # of I/Os currently in progress
81 The only field that should go to zero. Incremented as requests are 81 The only field that should go to zero. Incremented as requests are
82 given to appropriate request_queue_t and decremented as they finish. 82 given to appropriate struct request_queue and decremented as they finish.
83Field 10 -- # of milliseconds spent doing I/Os 83Field 10 -- # of milliseconds spent doing I/Os
84 This field is increases so long as field 9 is nonzero. 84 This field is increases so long as field 9 is nonzero.
85Field 11 -- weighted # of milliseconds spent doing I/Os 85Field 11 -- weighted # of milliseconds spent doing I/Os
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index de7e6ef48bd..0360b1f14d1 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work)
161/* 161/*
162 * Mailbox interrupt handler 162 * Mailbox interrupt handler
163 */ 163 */
164static void mbox_txq_fn(request_queue_t * q) 164static void mbox_txq_fn(struct request_queue * q)
165{ 165{
166} 166}
167 167
168static void mbox_rxq_fn(request_queue_t * q) 168static void mbox_rxq_fn(struct request_queue * q)
169{ 169{
170} 170}
171 171
@@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
180{ 180{
181 struct request *rq; 181 struct request *rq;
182 mbox_msg_t msg; 182 mbox_msg_t msg;
183 request_queue_t *q = mbox->rxq->queue; 183 struct request_queue *q = mbox->rxq->queue;
184 184
185 disable_mbox_irq(mbox, IRQ_RX); 185 disable_mbox_irq(mbox, IRQ_RX);
186 186
@@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
297 request_fn_proc * proc, 297 request_fn_proc * proc,
298 void (*work) (struct work_struct *)) 298 void (*work) (struct work_struct *))
299{ 299{
300 request_queue_t *q; 300 struct request_queue *q;
301 struct omap_mbox_queue *mq; 301 struct omap_mbox_queue *mq;
302 302
303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); 303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index fc27f6c72b4..aff661fe2ee 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -469,7 +469,7 @@ __uml_help(fakehd,
469" Change the ubd device name to \"hd\".\n\n" 469" Change the ubd device name to \"hd\".\n\n"
470); 470);
471 471
472static void do_ubd_request(request_queue_t * q); 472static void do_ubd_request(struct request_queue * q);
473 473
474/* Only changed by ubd_init, which is an initcall. */ 474/* Only changed by ubd_init, which is an initcall. */
475int thread_fd = -1; 475int thread_fd = -1;
@@ -1081,7 +1081,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
1081} 1081}
1082 1082
1083/* Called with dev->lock held */ 1083/* Called with dev->lock held */
1084static void do_ubd_request(request_queue_t *q) 1084static void do_ubd_request(struct request_queue *q)
1085{ 1085{
1086 struct io_thread_req *io_req; 1086 struct io_thread_req *io_req;
1087 struct request *req; 1087 struct request *req;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 3e316dd7252..dc715a562e1 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad)
796 * as_completed_request is to be called when a request has completed and 796 * as_completed_request is to be called when a request has completed and
797 * returned something to the requesting process, be it an error or data. 797 * returned something to the requesting process, be it an error or data.
798 */ 798 */
799static void as_completed_request(request_queue_t *q, struct request *rq) 799static void as_completed_request(struct request_queue *q, struct request *rq)
800{ 800{
801 struct as_data *ad = q->elevator->elevator_data; 801 struct as_data *ad = q->elevator->elevator_data;
802 802
@@ -853,7 +853,8 @@ out:
853 * reference unless it replaces the request at somepart of the elevator 853 * reference unless it replaces the request at somepart of the elevator
854 * (ie. the dispatch queue) 854 * (ie. the dispatch queue)
855 */ 855 */
856static void as_remove_queued_request(request_queue_t *q, struct request *rq) 856static void as_remove_queued_request(struct request_queue *q,
857 struct request *rq)
857{ 858{
858 const int data_dir = rq_is_sync(rq); 859 const int data_dir = rq_is_sync(rq);
859 struct as_data *ad = q->elevator->elevator_data; 860 struct as_data *ad = q->elevator->elevator_data;
@@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
978 * read/write expire, batch expire, etc, and moves it to the dispatch 979 * read/write expire, batch expire, etc, and moves it to the dispatch
979 * queue. Returns 1 if a request was found, 0 otherwise. 980 * queue. Returns 1 if a request was found, 0 otherwise.
980 */ 981 */
981static int as_dispatch_request(request_queue_t *q, int force) 982static int as_dispatch_request(struct request_queue *q, int force)
982{ 983{
983 struct as_data *ad = q->elevator->elevator_data; 984 struct as_data *ad = q->elevator->elevator_data;
984 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 985 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
@@ -1139,7 +1140,7 @@ fifo_expired:
1139/* 1140/*
1140 * add rq to rbtree and fifo 1141 * add rq to rbtree and fifo
1141 */ 1142 */
1142static void as_add_request(request_queue_t *q, struct request *rq) 1143static void as_add_request(struct request_queue *q, struct request *rq)
1143{ 1144{
1144 struct as_data *ad = q->elevator->elevator_data; 1145 struct as_data *ad = q->elevator->elevator_data;
1145 int data_dir; 1146 int data_dir;
@@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1167 RQ_SET_STATE(rq, AS_RQ_QUEUED); 1168 RQ_SET_STATE(rq, AS_RQ_QUEUED);
1168} 1169}
1169 1170
1170static void as_activate_request(request_queue_t *q, struct request *rq) 1171static void as_activate_request(struct request_queue *q, struct request *rq)
1171{ 1172{
1172 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); 1173 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
1173 RQ_SET_STATE(rq, AS_RQ_REMOVED); 1174 RQ_SET_STATE(rq, AS_RQ_REMOVED);
@@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq)
1175 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); 1176 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
1176} 1177}
1177 1178
1178static void as_deactivate_request(request_queue_t *q, struct request *rq) 1179static void as_deactivate_request(struct request_queue *q, struct request *rq)
1179{ 1180{
1180 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); 1181 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
1181 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 1182 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
@@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq)
1189 * is not empty - it is used in the block layer to check for plugging and 1190 * is not empty - it is used in the block layer to check for plugging and
1190 * merging opportunities 1191 * merging opportunities
1191 */ 1192 */
1192static int as_queue_empty(request_queue_t *q) 1193static int as_queue_empty(struct request_queue *q)
1193{ 1194{
1194 struct as_data *ad = q->elevator->elevator_data; 1195 struct as_data *ad = q->elevator->elevator_data;
1195 1196
@@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q)
1198} 1199}
1199 1200
1200static int 1201static int
1201as_merge(request_queue_t *q, struct request **req, struct bio *bio) 1202as_merge(struct request_queue *q, struct request **req, struct bio *bio)
1202{ 1203{
1203 struct as_data *ad = q->elevator->elevator_data; 1204 struct as_data *ad = q->elevator->elevator_data;
1204 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1205 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
@@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1216 return ELEVATOR_NO_MERGE; 1217 return ELEVATOR_NO_MERGE;
1217} 1218}
1218 1219
1219static void as_merged_request(request_queue_t *q, struct request *req, int type) 1220static void as_merged_request(struct request_queue *q, struct request *req,
1221 int type)
1220{ 1222{
1221 struct as_data *ad = q->elevator->elevator_data; 1223 struct as_data *ad = q->elevator->elevator_data;
1222 1224
@@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
1234 } 1236 }
1235} 1237}
1236 1238
1237static void as_merged_requests(request_queue_t *q, struct request *req, 1239static void as_merged_requests(struct request_queue *q, struct request *req,
1238 struct request *next) 1240 struct request *next)
1239{ 1241{
1240 /* 1242 /*
@@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work)
1285 spin_unlock_irqrestore(q->queue_lock, flags); 1287 spin_unlock_irqrestore(q->queue_lock, flags);
1286} 1288}
1287 1289
1288static int as_may_queue(request_queue_t *q, int rw) 1290static int as_may_queue(struct request_queue *q, int rw)
1289{ 1291{
1290 int ret = ELV_MQUEUE_MAY; 1292 int ret = ELV_MQUEUE_MAY;
1291 struct as_data *ad = q->elevator->elevator_data; 1293 struct as_data *ad = q->elevator->elevator_data;
@@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e)
1318/* 1320/*
1319 * initialize elevator private data (as_data). 1321 * initialize elevator private data (as_data).
1320 */ 1322 */
1321static void *as_init_queue(request_queue_t *q) 1323static void *as_init_queue(struct request_queue *q)
1322{ 1324{
1323 struct as_data *ad; 1325 struct as_data *ad;
1324 1326
diff --git a/block/blktrace.c b/block/blktrace.c
index 3f0e7c37c05..20c3e22587b 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
231 kfree(bt); 231 kfree(bt);
232} 232}
233 233
234static int blk_trace_remove(request_queue_t *q) 234static int blk_trace_remove(struct request_queue *q)
235{ 235{
236 struct blk_trace *bt; 236 struct blk_trace *bt;
237 237
@@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = {
312/* 312/*
313 * Setup everything required to start tracing 313 * Setup everything required to start tracing
314 */ 314 */
315static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, 315static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
316 char __user *arg) 316 char __user *arg)
317{ 317{
318 struct blk_user_trace_setup buts; 318 struct blk_user_trace_setup buts;
@@ -401,7 +401,7 @@ err:
401 return ret; 401 return ret;
402} 402}
403 403
404static int blk_trace_startstop(request_queue_t *q, int start) 404static int blk_trace_startstop(struct request_queue *q, int start)
405{ 405{
406 struct blk_trace *bt; 406 struct blk_trace *bt;
407 int ret; 407 int ret;
@@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start)
444 **/ 444 **/
445int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 445int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
446{ 446{
447 request_queue_t *q; 447 struct request_queue *q;
448 int ret, start = 0; 448 int ret, start = 0;
449 449
450 q = bdev_get_queue(bdev); 450 q = bdev_get_queue(bdev);
@@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
479 * @q: the request queue associated with the device 479 * @q: the request queue associated with the device
480 * 480 *
481 **/ 481 **/
482void blk_trace_shutdown(request_queue_t *q) 482void blk_trace_shutdown(struct request_queue *q)
483{ 483{
484 if (q->blk_trace) { 484 if (q->blk_trace) {
485 blk_trace_startstop(q, 0); 485 blk_trace_startstop(q, 0);
diff --git a/block/bsg.c b/block/bsg.c
index b571869928a..3b2f05258a9 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,7 @@
37#define BSG_VERSION "0.4" 37#define BSG_VERSION "0.4"
38 38
39struct bsg_device { 39struct bsg_device {
40 request_queue_t *queue; 40 struct request_queue *queue;
41 spinlock_t lock; 41 spinlock_t lock;
42 struct list_head busy_list; 42 struct list_head busy_list;
43 struct list_head done_list; 43 struct list_head done_list;
@@ -180,7 +180,7 @@ unlock:
180 return ret; 180 return ret;
181} 181}
182 182
183static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, 183static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
184 struct sg_io_v4 *hdr, int has_write_perm) 184 struct sg_io_v4 *hdr, int has_write_perm)
185{ 185{
186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -214,7 +214,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
214 * Check if sg_io_v4 from user is allowed and valid 214 * Check if sg_io_v4 from user is allowed and valid
215 */ 215 */
216static int 216static int
217bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) 217bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
218{ 218{
219 int ret = 0; 219 int ret = 0;
220 220
@@ -250,7 +250,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
250static struct request * 250static struct request *
251bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 251bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
252{ 252{
253 request_queue_t *q = bd->queue; 253 struct request_queue *q = bd->queue;
254 struct request *rq, *next_rq = NULL; 254 struct request *rq, *next_rq = NULL;
255 int ret, rw; 255 int ret, rw;
256 unsigned int dxfer_len; 256 unsigned int dxfer_len;
@@ -345,7 +345,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
345 * do final setup of a 'bc' and submit the matching 'rq' to the block 345 * do final setup of a 'bc' and submit the matching 'rq' to the block
346 * layer for io 346 * layer for io
347 */ 347 */
348static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, 348static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
349 struct bsg_command *bc, struct request *rq) 349 struct bsg_command *bc, struct request *rq)
350{ 350{
351 rq->sense = bc->sense; 351 rq->sense = bc->sense;
@@ -611,7 +611,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
611 bc = NULL; 611 bc = NULL;
612 ret = 0; 612 ret = 0;
613 while (nr_commands) { 613 while (nr_commands) {
614 request_queue_t *q = bd->queue; 614 struct request_queue *q = bd->queue;
615 615
616 bc = bsg_alloc_command(bd); 616 bc = bsg_alloc_command(bd);
617 if (IS_ERR(bc)) { 617 if (IS_ERR(bc)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d148ccbc36d..54dc0543900 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,7 +71,7 @@ struct cfq_rb_root {
71 * Per block device queue structure 71 * Per block device queue structure
72 */ 72 */
73struct cfq_data { 73struct cfq_data {
74 request_queue_t *queue; 74 struct request_queue *queue;
75 75
76 /* 76 /*
77 * rr list of queues with requests and the count of them 77 * rr list of queues with requests and the count of them
@@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new);
197CFQ_CFQQ_FNS(sync); 197CFQ_CFQQ_FNS(sync);
198#undef CFQ_CFQQ_FNS 198#undef CFQ_CFQQ_FNS
199 199
200static void cfq_dispatch_insert(request_queue_t *, struct request *); 200static void cfq_dispatch_insert(struct request_queue *, struct request *);
201static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 201static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
202 struct task_struct *, gfp_t); 202 struct task_struct *, gfp_t);
203static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, 203static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
@@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
237 kblockd_schedule_work(&cfqd->unplug_work); 237 kblockd_schedule_work(&cfqd->unplug_work);
238} 238}
239 239
240static int cfq_queue_empty(request_queue_t *q) 240static int cfq_queue_empty(struct request_queue *q)
241{ 241{
242 struct cfq_data *cfqd = q->elevator->elevator_data; 242 struct cfq_data *cfqd = q->elevator->elevator_data;
243 243
@@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
623 return NULL; 623 return NULL;
624} 624}
625 625
626static void cfq_activate_request(request_queue_t *q, struct request *rq) 626static void cfq_activate_request(struct request_queue *q, struct request *rq)
627{ 627{
628 struct cfq_data *cfqd = q->elevator->elevator_data; 628 struct cfq_data *cfqd = q->elevator->elevator_data;
629 629
@@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
642} 642}
643 643
644static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 644static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
645{ 645{
646 struct cfq_data *cfqd = q->elevator->elevator_data; 646 struct cfq_data *cfqd = q->elevator->elevator_data;
647 647
@@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq)
665 } 665 }
666} 666}
667 667
668static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) 668static int cfq_merge(struct request_queue *q, struct request **req,
669 struct bio *bio)
669{ 670{
670 struct cfq_data *cfqd = q->elevator->elevator_data; 671 struct cfq_data *cfqd = q->elevator->elevator_data;
671 struct request *__rq; 672 struct request *__rq;
@@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
679 return ELEVATOR_NO_MERGE; 680 return ELEVATOR_NO_MERGE;
680} 681}
681 682
682static void cfq_merged_request(request_queue_t *q, struct request *req, 683static void cfq_merged_request(struct request_queue *q, struct request *req,
683 int type) 684 int type)
684{ 685{
685 if (type == ELEVATOR_FRONT_MERGE) { 686 if (type == ELEVATOR_FRONT_MERGE) {
@@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req,
690} 691}
691 692
692static void 693static void
693cfq_merged_requests(request_queue_t *q, struct request *rq, 694cfq_merged_requests(struct request_queue *q, struct request *rq,
694 struct request *next) 695 struct request *next)
695{ 696{
696 /* 697 /*
@@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
703 cfq_remove_request(next); 704 cfq_remove_request(next);
704} 705}
705 706
706static int cfq_allow_merge(request_queue_t *q, struct request *rq, 707static int cfq_allow_merge(struct request_queue *q, struct request *rq,
707 struct bio *bio) 708 struct bio *bio)
708{ 709{
709 struct cfq_data *cfqd = q->elevator->elevator_data; 710 struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
913/* 914/*
914 * Move request from internal lists to the request queue dispatch list. 915 * Move request from internal lists to the request queue dispatch list.
915 */ 916 */
916static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) 917static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
917{ 918{
918 struct cfq_data *cfqd = q->elevator->elevator_data; 919 struct cfq_data *cfqd = q->elevator->elevator_data;
919 struct cfq_queue *cfqq = RQ_CFQQ(rq); 920 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1093 return dispatched; 1094 return dispatched;
1094} 1095}
1095 1096
1096static int cfq_dispatch_requests(request_queue_t *q, int force) 1097static int cfq_dispatch_requests(struct request_queue *q, int force)
1097{ 1098{
1098 struct cfq_data *cfqd = q->elevator->elevator_data; 1099 struct cfq_data *cfqd = q->elevator->elevator_data;
1099 struct cfq_queue *cfqq; 1100 struct cfq_queue *cfqq;
@@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1214 struct cfq_data *cfqd = cic->key; 1215 struct cfq_data *cfqd = cic->key;
1215 1216
1216 if (cfqd) { 1217 if (cfqd) {
1217 request_queue_t *q = cfqd->queue; 1218 struct request_queue *q = cfqd->queue;
1218 1219
1219 spin_lock_irq(q->queue_lock); 1220 spin_lock_irq(q->queue_lock);
1220 __cfq_exit_single_io_context(cfqd, cic); 1221 __cfq_exit_single_io_context(cfqd, cic);
@@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1775 } 1776 }
1776} 1777}
1777 1778
1778static void cfq_insert_request(request_queue_t *q, struct request *rq) 1779static void cfq_insert_request(struct request_queue *q, struct request *rq)
1779{ 1780{
1780 struct cfq_data *cfqd = q->elevator->elevator_data; 1781 struct cfq_data *cfqd = q->elevator->elevator_data;
1781 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1782 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
1789 cfq_rq_enqueued(cfqd, cfqq, rq); 1790 cfq_rq_enqueued(cfqd, cfqq, rq);
1790} 1791}
1791 1792
1792static void cfq_completed_request(request_queue_t *q, struct request *rq) 1793static void cfq_completed_request(struct request_queue *q, struct request *rq)
1793{ 1794{
1794 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1795 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1795 struct cfq_data *cfqd = cfqq->cfqd; 1796 struct cfq_data *cfqd = cfqq->cfqd;
@@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1868 return ELV_MQUEUE_MAY; 1869 return ELV_MQUEUE_MAY;
1869} 1870}
1870 1871
1871static int cfq_may_queue(request_queue_t *q, int rw) 1872static int cfq_may_queue(struct request_queue *q, int rw)
1872{ 1873{
1873 struct cfq_data *cfqd = q->elevator->elevator_data; 1874 struct cfq_data *cfqd = q->elevator->elevator_data;
1874 struct task_struct *tsk = current; 1875 struct task_struct *tsk = current;
@@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq)
1922 * Allocate cfq data structures associated with this request. 1923 * Allocate cfq data structures associated with this request.
1923 */ 1924 */
1924static int 1925static int
1925cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 1926cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1926{ 1927{
1927 struct cfq_data *cfqd = q->elevator->elevator_data; 1928 struct cfq_data *cfqd = q->elevator->elevator_data;
1928 struct task_struct *tsk = current; 1929 struct task_struct *tsk = current;
@@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work)
1974{ 1975{
1975 struct cfq_data *cfqd = 1976 struct cfq_data *cfqd =
1976 container_of(work, struct cfq_data, unplug_work); 1977 container_of(work, struct cfq_data, unplug_work);
1977 request_queue_t *q = cfqd->queue; 1978 struct request_queue *q = cfqd->queue;
1978 unsigned long flags; 1979 unsigned long flags;
1979 1980
1980 spin_lock_irqsave(q->queue_lock, flags); 1981 spin_lock_irqsave(q->queue_lock, flags);
@@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
2072static void cfq_exit_queue(elevator_t *e) 2073static void cfq_exit_queue(elevator_t *e)
2073{ 2074{
2074 struct cfq_data *cfqd = e->elevator_data; 2075 struct cfq_data *cfqd = e->elevator_data;
2075 request_queue_t *q = cfqd->queue; 2076 struct request_queue *q = cfqd->queue;
2076 2077
2077 cfq_shutdown_timer_wq(cfqd); 2078 cfq_shutdown_timer_wq(cfqd);
2078 2079
@@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e)
2098 kfree(cfqd); 2099 kfree(cfqd);
2099} 2100}
2100 2101
2101static void *cfq_init_queue(request_queue_t *q) 2102static void *cfq_init_queue(struct request_queue *q)
2102{ 2103{
2103 struct cfq_data *cfqd; 2104 struct cfq_data *cfqd;
2104 2105
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 87ca02ac84c..1a511ffaf8a 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
106/* 106/*
107 * remove rq from rbtree and fifo. 107 * remove rq from rbtree and fifo.
108 */ 108 */
109static void deadline_remove_request(request_queue_t *q, struct request *rq) 109static void deadline_remove_request(struct request_queue *q, struct request *rq)
110{ 110{
111 struct deadline_data *dd = q->elevator->elevator_data; 111 struct deadline_data *dd = q->elevator->elevator_data;
112 112
@@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
115} 115}
116 116
117static int 117static int
118deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) 118deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
119{ 119{
120 struct deadline_data *dd = q->elevator->elevator_data; 120 struct deadline_data *dd = q->elevator->elevator_data;
121 struct request *__rq; 121 struct request *__rq;
@@ -144,8 +144,8 @@ out:
144 return ret; 144 return ret;
145} 145}
146 146
147static void deadline_merged_request(request_queue_t *q, struct request *req, 147static void deadline_merged_request(struct request_queue *q,
148 int type) 148 struct request *req, int type)
149{ 149{
150 struct deadline_data *dd = q->elevator->elevator_data; 150 struct deadline_data *dd = q->elevator->elevator_data;
151 151
@@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
159} 159}
160 160
161static void 161static void
162deadline_merged_requests(request_queue_t *q, struct request *req, 162deadline_merged_requests(struct request_queue *q, struct request *req,
163 struct request *next) 163 struct request *next)
164{ 164{
165 /* 165 /*
@@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
185static inline void 185static inline void
186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) 186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
187{ 187{
188 request_queue_t *q = rq->q; 188 struct request_queue *q = rq->q;
189 189
190 deadline_remove_request(q, rq); 190 deadline_remove_request(q, rq);
191 elv_dispatch_add_tail(q, rq); 191 elv_dispatch_add_tail(q, rq);
@@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
236 * deadline_dispatch_requests selects the best request according to 236 * deadline_dispatch_requests selects the best request according to
237 * read/write expire, fifo_batch, etc 237 * read/write expire, fifo_batch, etc
238 */ 238 */
239static int deadline_dispatch_requests(request_queue_t *q, int force) 239static int deadline_dispatch_requests(struct request_queue *q, int force)
240{ 240{
241 struct deadline_data *dd = q->elevator->elevator_data; 241 struct deadline_data *dd = q->elevator->elevator_data;
242 const int reads = !list_empty(&dd->fifo_list[READ]); 242 const int reads = !list_empty(&dd->fifo_list[READ]);
@@ -335,7 +335,7 @@ dispatch_request:
335 return 1; 335 return 1;
336} 336}
337 337
338static int deadline_queue_empty(request_queue_t *q) 338static int deadline_queue_empty(struct request_queue *q)
339{ 339{
340 struct deadline_data *dd = q->elevator->elevator_data; 340 struct deadline_data *dd = q->elevator->elevator_data;
341 341
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
356/* 356/*
357 * initialize elevator private data (deadline_data). 357 * initialize elevator private data (deadline_data).
358 */ 358 */
359static void *deadline_init_queue(request_queue_t *q) 359static void *deadline_init_queue(struct request_queue *q)
360{ 360{
361 struct deadline_data *dd; 361 struct deadline_data *dd;
362 362
diff --git a/block/elevator.c b/block/elevator.c
index d265963d1ed..c6d153de9fd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -56,7 +56,7 @@ static const int elv_hash_shift = 6;
56 */ 56 */
57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58{ 58{
59 request_queue_t *q = rq->q; 59 struct request_queue *q = rq->q;
60 elevator_t *e = q->elevator; 60 elevator_t *e = q->elevator;
61 61
62 if (e->ops->elevator_allow_merge_fn) 62 if (e->ops->elevator_allow_merge_fn)
@@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name)
141 return e; 141 return e;
142} 142}
143 143
144static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) 144static void *elevator_init_queue(struct request_queue *q,
145 struct elevator_queue *eq)
145{ 146{
146 return eq->ops->elevator_init_fn(q); 147 return eq->ops->elevator_init_fn(q);
147} 148}
148 149
149static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, 150static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
150 void *data) 151 void *data)
151{ 152{
152 q->elevator = eq; 153 q->elevator = eq;
@@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup);
172 173
173static struct kobj_type elv_ktype; 174static struct kobj_type elv_ktype;
174 175
175static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) 176static elevator_t *elevator_alloc(struct request_queue *q,
177 struct elevator_type *e)
176{ 178{
177 elevator_t *eq; 179 elevator_t *eq;
178 int i; 180 int i;
@@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj)
212 kfree(e); 214 kfree(e);
213} 215}
214 216
215int elevator_init(request_queue_t *q, char *name) 217int elevator_init(struct request_queue *q, char *name)
216{ 218{
217 struct elevator_type *e = NULL; 219 struct elevator_type *e = NULL;
218 struct elevator_queue *eq; 220 struct elevator_queue *eq;
@@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e)
264 266
265EXPORT_SYMBOL(elevator_exit); 267EXPORT_SYMBOL(elevator_exit);
266 268
267static void elv_activate_rq(request_queue_t *q, struct request *rq) 269static void elv_activate_rq(struct request_queue *q, struct request *rq)
268{ 270{
269 elevator_t *e = q->elevator; 271 elevator_t *e = q->elevator;
270 272
@@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq)
272 e->ops->elevator_activate_req_fn(q, rq); 274 e->ops->elevator_activate_req_fn(q, rq);
273} 275}
274 276
275static void elv_deactivate_rq(request_queue_t *q, struct request *rq) 277static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
276{ 278{
277 elevator_t *e = q->elevator; 279 elevator_t *e = q->elevator;
278 280
@@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq)
285 hlist_del_init(&rq->hash); 287 hlist_del_init(&rq->hash);
286} 288}
287 289
288static void elv_rqhash_del(request_queue_t *q, struct request *rq) 290static void elv_rqhash_del(struct request_queue *q, struct request *rq)
289{ 291{
290 if (ELV_ON_HASH(rq)) 292 if (ELV_ON_HASH(rq))
291 __elv_rqhash_del(rq); 293 __elv_rqhash_del(rq);
292} 294}
293 295
294static void elv_rqhash_add(request_queue_t *q, struct request *rq) 296static void elv_rqhash_add(struct request_queue *q, struct request *rq)
295{ 297{
296 elevator_t *e = q->elevator; 298 elevator_t *e = q->elevator;
297 299
@@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq)
299 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 301 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
300} 302}
301 303
302static void elv_rqhash_reposition(request_queue_t *q, struct request *rq) 304static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
303{ 305{
304 __elv_rqhash_del(rq); 306 __elv_rqhash_del(rq);
305 elv_rqhash_add(q, rq); 307 elv_rqhash_add(q, rq);
306} 308}
307 309
308static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset) 310static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
309{ 311{
310 elevator_t *e = q->elevator; 312 elevator_t *e = q->elevator;
311 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 313 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
@@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find);
391 * entry. rq is sort insted into the dispatch queue. To be used by 393 * entry. rq is sort insted into the dispatch queue. To be used by
392 * specific elevators. 394 * specific elevators.
393 */ 395 */
394void elv_dispatch_sort(request_queue_t *q, struct request *rq) 396void elv_dispatch_sort(struct request_queue *q, struct request *rq)
395{ 397{
396 sector_t boundary; 398 sector_t boundary;
397 struct list_head *entry; 399 struct list_head *entry;
@@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
449 451
450EXPORT_SYMBOL(elv_dispatch_add_tail); 452EXPORT_SYMBOL(elv_dispatch_add_tail);
451 453
452int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 454int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
453{ 455{
454 elevator_t *e = q->elevator; 456 elevator_t *e = q->elevator;
455 struct request *__rq; 457 struct request *__rq;
@@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
481 return ELEVATOR_NO_MERGE; 483 return ELEVATOR_NO_MERGE;
482} 484}
483 485
484void elv_merged_request(request_queue_t *q, struct request *rq, int type) 486void elv_merged_request(struct request_queue *q, struct request *rq, int type)
485{ 487{
486 elevator_t *e = q->elevator; 488 elevator_t *e = q->elevator;
487 489
@@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type)
494 q->last_merge = rq; 496 q->last_merge = rq;
495} 497}
496 498
497void elv_merge_requests(request_queue_t *q, struct request *rq, 499void elv_merge_requests(struct request_queue *q, struct request *rq,
498 struct request *next) 500 struct request *next)
499{ 501{
500 elevator_t *e = q->elevator; 502 elevator_t *e = q->elevator;
@@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
509 q->last_merge = rq; 511 q->last_merge = rq;
510} 512}
511 513
512void elv_requeue_request(request_queue_t *q, struct request *rq) 514void elv_requeue_request(struct request_queue *q, struct request *rq)
513{ 515{
514 /* 516 /*
515 * it already went through dequeue, we need to decrement the 517 * it already went through dequeue, we need to decrement the
@@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
526 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 528 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
527} 529}
528 530
529static void elv_drain_elevator(request_queue_t *q) 531static void elv_drain_elevator(struct request_queue *q)
530{ 532{
531 static int printed; 533 static int printed;
532 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 534 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q)
540 } 542 }
541} 543}
542 544
543void elv_insert(request_queue_t *q, struct request *rq, int where) 545void elv_insert(struct request_queue *q, struct request *rq, int where)
544{ 546{
545 struct list_head *pos; 547 struct list_head *pos;
546 unsigned ordseq; 548 unsigned ordseq;
@@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
638 } 640 }
639} 641}
640 642
641void __elv_add_request(request_queue_t *q, struct request *rq, int where, 643void __elv_add_request(struct request_queue *q, struct request *rq, int where,
642 int plug) 644 int plug)
643{ 645{
644 if (q->ordcolor) 646 if (q->ordcolor)
@@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
676 678
677EXPORT_SYMBOL(__elv_add_request); 679EXPORT_SYMBOL(__elv_add_request);
678 680
679void elv_add_request(request_queue_t *q, struct request *rq, int where, 681void elv_add_request(struct request_queue *q, struct request *rq, int where,
680 int plug) 682 int plug)
681{ 683{
682 unsigned long flags; 684 unsigned long flags;
@@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
688 690
689EXPORT_SYMBOL(elv_add_request); 691EXPORT_SYMBOL(elv_add_request);
690 692
691static inline struct request *__elv_next_request(request_queue_t *q) 693static inline struct request *__elv_next_request(struct request_queue *q)
692{ 694{
693 struct request *rq; 695 struct request *rq;
694 696
@@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q)
704 } 706 }
705} 707}
706 708
707struct request *elv_next_request(request_queue_t *q) 709struct request *elv_next_request(struct request_queue *q)
708{ 710{
709 struct request *rq; 711 struct request *rq;
710 int ret; 712 int ret;
@@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q)
770 772
771EXPORT_SYMBOL(elv_next_request); 773EXPORT_SYMBOL(elv_next_request);
772 774
773void elv_dequeue_request(request_queue_t *q, struct request *rq) 775void elv_dequeue_request(struct request_queue *q, struct request *rq)
774{ 776{
775 BUG_ON(list_empty(&rq->queuelist)); 777 BUG_ON(list_empty(&rq->queuelist));
776 BUG_ON(ELV_ON_HASH(rq)); 778 BUG_ON(ELV_ON_HASH(rq));
@@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
788 790
789EXPORT_SYMBOL(elv_dequeue_request); 791EXPORT_SYMBOL(elv_dequeue_request);
790 792
791int elv_queue_empty(request_queue_t *q) 793int elv_queue_empty(struct request_queue *q)
792{ 794{
793 elevator_t *e = q->elevator; 795 elevator_t *e = q->elevator;
794 796
@@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q)
803 805
804EXPORT_SYMBOL(elv_queue_empty); 806EXPORT_SYMBOL(elv_queue_empty);
805 807
806struct request *elv_latter_request(request_queue_t *q, struct request *rq) 808struct request *elv_latter_request(struct request_queue *q, struct request *rq)
807{ 809{
808 elevator_t *e = q->elevator; 810 elevator_t *e = q->elevator;
809 811
@@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
812 return NULL; 814 return NULL;
813} 815}
814 816
815struct request *elv_former_request(request_queue_t *q, struct request *rq) 817struct request *elv_former_request(struct request_queue *q, struct request *rq)
816{ 818{
817 elevator_t *e = q->elevator; 819 elevator_t *e = q->elevator;
818 820
@@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
821 return NULL; 823 return NULL;
822} 824}
823 825
824int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 826int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
825{ 827{
826 elevator_t *e = q->elevator; 828 elevator_t *e = q->elevator;
827 829
@@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
832 return 0; 834 return 0;
833} 835}
834 836
835void elv_put_request(request_queue_t *q, struct request *rq) 837void elv_put_request(struct request_queue *q, struct request *rq)
836{ 838{
837 elevator_t *e = q->elevator; 839 elevator_t *e = q->elevator;
838 840
@@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
840 e->ops->elevator_put_req_fn(rq); 842 e->ops->elevator_put_req_fn(rq);
841} 843}
842 844
843int elv_may_queue(request_queue_t *q, int rw) 845int elv_may_queue(struct request_queue *q, int rw)
844{ 846{
845 elevator_t *e = q->elevator; 847 elevator_t *e = q->elevator;
846 848
@@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw)
850 return ELV_MQUEUE_MAY; 852 return ELV_MQUEUE_MAY;
851} 853}
852 854
853void elv_completed_request(request_queue_t *q, struct request *rq) 855void elv_completed_request(struct request_queue *q, struct request *rq)
854{ 856{
855 elevator_t *e = q->elevator; 857 elevator_t *e = q->elevator;
856 858
@@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
1006 * need for the new one. this way we have a chance of going back to the old 1008 * need for the new one. this way we have a chance of going back to the old
1007 * one, if the new one fails init for some reason. 1009 * one, if the new one fails init for some reason.
1008 */ 1010 */
1009static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) 1011static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1010{ 1012{
1011 elevator_t *old_elevator, *e; 1013 elevator_t *old_elevator, *e;
1012 void *data; 1014 void *data;
@@ -1078,7 +1080,8 @@ fail_register:
1078 return 0; 1080 return 0;
1079} 1081}
1080 1082
1081ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) 1083ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1084 size_t count)
1082{ 1085{
1083 char elevator_name[ELV_NAME_MAX]; 1086 char elevator_name[ELV_NAME_MAX];
1084 size_t len; 1087 size_t len;
@@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1107 return count; 1110 return count;
1108} 1111}
1109 1112
1110ssize_t elv_iosched_show(request_queue_t *q, char *name) 1113ssize_t elv_iosched_show(struct request_queue *q, char *name)
1111{ 1114{
1112 elevator_t *e = q->elevator; 1115 elevator_t *e = q->elevator;
1113 struct elevator_type *elv = e->elevator_type; 1116 struct elevator_type *elv = e->elevator_type;
@@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
1127 return len; 1130 return len;
1128} 1131}
1129 1132
1130struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) 1133struct request *elv_rb_former_request(struct request_queue *q,
1134 struct request *rq)
1131{ 1135{
1132 struct rb_node *rbprev = rb_prev(&rq->rb_node); 1136 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1133 1137
@@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1139 1143
1140EXPORT_SYMBOL(elv_rb_former_request); 1144EXPORT_SYMBOL(elv_rb_former_request);
1141 1145
1142struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq) 1146struct request *elv_rb_latter_request(struct request_queue *q,
1147 struct request *rq)
1143{ 1148{
1144 struct rb_node *rbnext = rb_next(&rq->rb_node); 1149 struct rb_node *rbnext = rb_next(&rq->rb_node);
1145 1150
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 66056ca5e63..8c2caff87cc 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work);
40static void blk_unplug_timeout(unsigned long data); 40static void blk_unplug_timeout(unsigned long data);
41static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 41static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
42static void init_request_from_bio(struct request *req, struct bio *bio); 42static void init_request_from_bio(struct request *req, struct bio *bio);
43static int __make_request(request_queue_t *q, struct bio *bio); 43static int __make_request(struct request_queue *q, struct bio *bio);
44static struct io_context *current_io_context(gfp_t gfp_flags, int node); 44static struct io_context *current_io_context(gfp_t gfp_flags, int node);
45 45
46/* 46/*
@@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
122{ 122{
123 struct backing_dev_info *ret = NULL; 123 struct backing_dev_info *ret = NULL;
124 request_queue_t *q = bdev_get_queue(bdev); 124 struct request_queue *q = bdev_get_queue(bdev);
125 125
126 if (q) 126 if (q)
127 ret = &q->backing_dev_info; 127 ret = &q->backing_dev_info;
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
140 * cdb from the request data for instance. 140 * cdb from the request data for instance.
141 * 141 *
142 */ 142 */
143void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) 143void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
144{ 144{
145 q->prep_rq_fn = pfn; 145 q->prep_rq_fn = pfn;
146} 146}
@@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are 163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
164 * honored. 164 * honored.
165 */ 165 */
166void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) 166void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
167{ 167{
168 q->merge_bvec_fn = mbfn; 168 q->merge_bvec_fn = mbfn;
169} 169}
170 170
171EXPORT_SYMBOL(blk_queue_merge_bvec); 171EXPORT_SYMBOL(blk_queue_merge_bvec);
172 172
173void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) 173void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
174{ 174{
175 q->softirq_done_fn = fn; 175 q->softirq_done_fn = fn;
176} 176}
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
200 * blk_queue_bounce() to create a buffer in normal memory. 200 * blk_queue_bounce() to create a buffer in normal memory.
201 **/ 201 **/
202void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) 202void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
203{ 203{
204 /* 204 /*
205 * set defaults 205 * set defaults
@@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
235 235
236EXPORT_SYMBOL(blk_queue_make_request); 236EXPORT_SYMBOL(blk_queue_make_request);
237 237
238static void rq_init(request_queue_t *q, struct request *rq) 238static void rq_init(struct request_queue *q, struct request *rq)
239{ 239{
240 INIT_LIST_HEAD(&rq->queuelist); 240 INIT_LIST_HEAD(&rq->queuelist);
241 INIT_LIST_HEAD(&rq->donelist); 241 INIT_LIST_HEAD(&rq->donelist);
@@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
272 * feature should call this function and indicate so. 272 * feature should call this function and indicate so.
273 * 273 *
274 **/ 274 **/
275int blk_queue_ordered(request_queue_t *q, unsigned ordered, 275int blk_queue_ordered(struct request_queue *q, unsigned ordered,
276 prepare_flush_fn *prepare_flush_fn) 276 prepare_flush_fn *prepare_flush_fn)
277{ 277{
278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
311 * to the block layer by defining it through this call. 311 * to the block layer by defining it through this call.
312 * 312 *
313 **/ 313 **/
314void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) 314void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
315{ 315{
316 q->issue_flush_fn = iff; 316 q->issue_flush_fn = iff;
317} 317}
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
321/* 321/*
322 * Cache flushing for ordered writes handling 322 * Cache flushing for ordered writes handling
323 */ 323 */
324inline unsigned blk_ordered_cur_seq(request_queue_t *q) 324inline unsigned blk_ordered_cur_seq(struct request_queue *q)
325{ 325{
326 if (!q->ordseq) 326 if (!q->ordseq)
327 return 0; 327 return 0;
@@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
330 330
331unsigned blk_ordered_req_seq(struct request *rq) 331unsigned blk_ordered_req_seq(struct request *rq)
332{ 332{
333 request_queue_t *q = rq->q; 333 struct request_queue *q = rq->q;
334 334
335 BUG_ON(q->ordseq == 0); 335 BUG_ON(q->ordseq == 0);
336 336
@@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
357 return QUEUE_ORDSEQ_DONE; 357 return QUEUE_ORDSEQ_DONE;
358} 358}
359 359
360void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) 360void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
361{ 361{
362 struct request *rq; 362 struct request *rq;
363 int uptodate; 363 int uptodate;
@@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error)
401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); 401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
402} 402}
403 403
404static void queue_flush(request_queue_t *q, unsigned which) 404static void queue_flush(struct request_queue *q, unsigned which)
405{ 405{
406 struct request *rq; 406 struct request *rq;
407 rq_end_io_fn *end_io; 407 rq_end_io_fn *end_io;
@@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
426} 426}
427 427
428static inline struct request *start_ordered(request_queue_t *q, 428static inline struct request *start_ordered(struct request_queue *q,
429 struct request *rq) 429 struct request *rq)
430{ 430{
431 q->bi_size = 0; 431 q->bi_size = 0;
@@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q,
476 return rq; 476 return rq;
477} 477}
478 478
479int blk_do_ordered(request_queue_t *q, struct request **rqp) 479int blk_do_ordered(struct request_queue *q, struct request **rqp)
480{ 480{
481 struct request *rq = *rqp; 481 struct request *rq = *rqp;
482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
@@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
527 527
528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) 528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
529{ 529{
530 request_queue_t *q = bio->bi_private; 530 struct request_queue *q = bio->bi_private;
531 531
532 /* 532 /*
533 * This is dry run, restore bio_sector and size. We'll finish 533 * This is dry run, restore bio_sector and size. We'll finish
@@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
551static int ordered_bio_endio(struct request *rq, struct bio *bio, 551static int ordered_bio_endio(struct request *rq, struct bio *bio,
552 unsigned int nbytes, int error) 552 unsigned int nbytes, int error)
553{ 553{
554 request_queue_t *q = rq->q; 554 struct request_queue *q = rq->q;
555 bio_end_io_t *endio; 555 bio_end_io_t *endio;
556 void *private; 556 void *private;
557 557
@@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
589 * buffers for doing I/O to pages residing above @page. 589 * buffers for doing I/O to pages residing above @page.
590 **/ 590 **/
591void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) 591void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
592{ 592{
593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
594 int dma = 0; 594 int dma = 0;
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
624 * Enables a low level driver to set an upper limit on the size of 624 * Enables a low level driver to set an upper limit on the size of
625 * received requests. 625 * received requests.
626 **/ 626 **/
627void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) 627void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
628{ 628{
629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
651 * physical data segments in a request. This would be the largest sized 651 * physical data segments in a request. This would be the largest sized
652 * scatter list the driver could handle. 652 * scatter list the driver could handle.
653 **/ 653 **/
654void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) 654void blk_queue_max_phys_segments(struct request_queue *q,
655 unsigned short max_segments)
655{ 656{
656 if (!max_segments) { 657 if (!max_segments) {
657 max_segments = 1; 658 max_segments = 1;
@@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
674 * address/length pairs the host adapter can actually give as once 675 * address/length pairs the host adapter can actually give as once
675 * to the device. 676 * to the device.
676 **/ 677 **/
677void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) 678void blk_queue_max_hw_segments(struct request_queue *q,
679 unsigned short max_segments)
678{ 680{
679 if (!max_segments) { 681 if (!max_segments) {
680 max_segments = 1; 682 max_segments = 1;
@@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
695 * Enables a low level driver to set an upper limit on the size of a 697 * Enables a low level driver to set an upper limit on the size of a
696 * coalesced segment 698 * coalesced segment
697 **/ 699 **/
698void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) 700void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
699{ 701{
700 if (max_size < PAGE_CACHE_SIZE) { 702 if (max_size < PAGE_CACHE_SIZE) {
701 max_size = PAGE_CACHE_SIZE; 703 max_size = PAGE_CACHE_SIZE;
@@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
718 * even internal read-modify-write operations). Usually the default 720 * even internal read-modify-write operations). Usually the default
719 * of 512 covers most hardware. 721 * of 512 covers most hardware.
720 **/ 722 **/
721void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) 723void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
722{ 724{
723 q->hardsect_size = size; 725 q->hardsect_size = size;
724} 726}
@@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
735 * @t: the stacking driver (top) 737 * @t: the stacking driver (top)
736 * @b: the underlying device (bottom) 738 * @b: the underlying device (bottom)
737 **/ 739 **/
738void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) 740void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
739{ 741{
740 /* zero is "infinity" */ 742 /* zero is "infinity" */
741 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 743 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
@@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
756 * @q: the request queue for the device 758 * @q: the request queue for the device
757 * @mask: the memory boundary mask 759 * @mask: the memory boundary mask
758 **/ 760 **/
759void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
760{ 762{
761 if (mask < PAGE_CACHE_SIZE - 1) { 763 if (mask < PAGE_CACHE_SIZE - 1) {
762 mask = PAGE_CACHE_SIZE - 1; 764 mask = PAGE_CACHE_SIZE - 1;
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
778 * this is used when buiding direct io requests for the queue. 780 * this is used when buiding direct io requests for the queue.
779 * 781 *
780 **/ 782 **/
781void blk_queue_dma_alignment(request_queue_t *q, int mask) 783void blk_queue_dma_alignment(struct request_queue *q, int mask)
782{ 784{
783 q->dma_alignment = mask; 785 q->dma_alignment = mask;
784} 786}
@@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
796 * 798 *
797 * no locks need be held. 799 * no locks need be held.
798 **/ 800 **/
799struct request *blk_queue_find_tag(request_queue_t *q, int tag) 801struct request *blk_queue_find_tag(struct request_queue *q, int tag)
800{ 802{
801 return blk_map_queue_find_tag(q->queue_tags, tag); 803 return blk_map_queue_find_tag(q->queue_tags, tag);
802} 804}
@@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
840 * blk_cleanup_queue() will take care of calling this function, if tagging 842 * blk_cleanup_queue() will take care of calling this function, if tagging
841 * has been used. So there's no need to call this directly. 843 * has been used. So there's no need to call this directly.
842 **/ 844 **/
843static void __blk_queue_free_tags(request_queue_t *q) 845static void __blk_queue_free_tags(struct request_queue *q)
844{ 846{
845 struct blk_queue_tag *bqt = q->queue_tags; 847 struct blk_queue_tag *bqt = q->queue_tags;
846 848
@@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags);
877 * This is used to disabled tagged queuing to a device, yet leave 879 * This is used to disabled tagged queuing to a device, yet leave
878 * queue in function. 880 * queue in function.
879 **/ 881 **/
880void blk_queue_free_tags(request_queue_t *q) 882void blk_queue_free_tags(struct request_queue *q)
881{ 883{
882 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 884 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
883} 885}
@@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q)
885EXPORT_SYMBOL(blk_queue_free_tags); 887EXPORT_SYMBOL(blk_queue_free_tags);
886 888
887static int 889static int
888init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) 890init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
889{ 891{
890 struct request **tag_index; 892 struct request **tag_index;
891 unsigned long *tag_map; 893 unsigned long *tag_map;
@@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags);
955 * @depth: the maximum queue depth supported 957 * @depth: the maximum queue depth supported
956 * @tags: the tag to use 958 * @tags: the tag to use
957 **/ 959 **/
958int blk_queue_init_tags(request_queue_t *q, int depth, 960int blk_queue_init_tags(struct request_queue *q, int depth,
959 struct blk_queue_tag *tags) 961 struct blk_queue_tag *tags)
960{ 962{
961 int rc; 963 int rc;
@@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags);
996 * Notes: 998 * Notes:
997 * Must be called with the queue lock held. 999 * Must be called with the queue lock held.
998 **/ 1000 **/
999int blk_queue_resize_tags(request_queue_t *q, int new_depth) 1001int blk_queue_resize_tags(struct request_queue *q, int new_depth)
1000{ 1002{
1001 struct blk_queue_tag *bqt = q->queue_tags; 1003 struct blk_queue_tag *bqt = q->queue_tags;
1002 struct request **tag_index; 1004 struct request **tag_index;
@@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
1059 * Notes: 1061 * Notes:
1060 * queue lock must be held. 1062 * queue lock must be held.
1061 **/ 1063 **/
1062void blk_queue_end_tag(request_queue_t *q, struct request *rq) 1064void blk_queue_end_tag(struct request_queue *q, struct request *rq)
1063{ 1065{
1064 struct blk_queue_tag *bqt = q->queue_tags; 1066 struct blk_queue_tag *bqt = q->queue_tags;
1065 int tag = rq->tag; 1067 int tag = rq->tag;
@@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
1111 * Notes: 1113 * Notes:
1112 * queue lock must be held. 1114 * queue lock must be held.
1113 **/ 1115 **/
1114int blk_queue_start_tag(request_queue_t *q, struct request *rq) 1116int blk_queue_start_tag(struct request_queue *q, struct request *rq)
1115{ 1117{
1116 struct blk_queue_tag *bqt = q->queue_tags; 1118 struct blk_queue_tag *bqt = q->queue_tags;
1117 int tag; 1119 int tag;
@@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag);
1158 * Notes: 1160 * Notes:
1159 * queue lock must be held. 1161 * queue lock must be held.
1160 **/ 1162 **/
1161void blk_queue_invalidate_tags(request_queue_t *q) 1163void blk_queue_invalidate_tags(struct request_queue *q)
1162{ 1164{
1163 struct blk_queue_tag *bqt = q->queue_tags; 1165 struct blk_queue_tag *bqt = q->queue_tags;
1164 struct list_head *tmp, *n; 1166 struct list_head *tmp, *n;
@@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
1205 1207
1206EXPORT_SYMBOL(blk_dump_rq_flags); 1208EXPORT_SYMBOL(blk_dump_rq_flags);
1207 1209
1208void blk_recount_segments(request_queue_t *q, struct bio *bio) 1210void blk_recount_segments(struct request_queue *q, struct bio *bio)
1209{ 1211{
1210 struct bio_vec *bv, *bvprv = NULL; 1212 struct bio_vec *bv, *bvprv = NULL;
1211 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; 1213 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
@@ -1267,7 +1269,7 @@ new_hw_segment:
1267} 1269}
1268EXPORT_SYMBOL(blk_recount_segments); 1270EXPORT_SYMBOL(blk_recount_segments);
1269 1271
1270static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1272static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
1271 struct bio *nxt) 1273 struct bio *nxt)
1272{ 1274{
1273 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) 1275 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1288 return 0; 1290 return 0;
1289} 1291}
1290 1292
1291static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, 1293static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
1292 struct bio *nxt) 1294 struct bio *nxt)
1293{ 1295{
1294 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 1296 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1308 * map a request to scatterlist, return number of sg entries setup. Caller 1310 * map a request to scatterlist, return number of sg entries setup. Caller
1309 * must make sure sg can hold rq->nr_phys_segments entries 1311 * must make sure sg can hold rq->nr_phys_segments entries
1310 */ 1312 */
1311int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) 1313int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1314 struct scatterlist *sg)
1312{ 1315{
1313 struct bio_vec *bvec, *bvprv; 1316 struct bio_vec *bvec, *bvprv;
1314 struct bio *bio; 1317 struct bio *bio;
@@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg);
1361 * specific ones if so desired 1364 * specific ones if so desired
1362 */ 1365 */
1363 1366
1364static inline int ll_new_mergeable(request_queue_t *q, 1367static inline int ll_new_mergeable(struct request_queue *q,
1365 struct request *req, 1368 struct request *req,
1366 struct bio *bio) 1369 struct bio *bio)
1367{ 1370{
@@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
1382 return 1; 1385 return 1;
1383} 1386}
1384 1387
1385static inline int ll_new_hw_segment(request_queue_t *q, 1388static inline int ll_new_hw_segment(struct request_queue *q,
1386 struct request *req, 1389 struct request *req,
1387 struct bio *bio) 1390 struct bio *bio)
1388{ 1391{
@@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1406 return 1; 1409 return 1;
1407} 1410}
1408 1411
1409int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) 1412int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
1410{ 1413{
1411 unsigned short max_sectors; 1414 unsigned short max_sectors;
1412 int len; 1415 int len;
@@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
1444} 1447}
1445EXPORT_SYMBOL(ll_back_merge_fn); 1448EXPORT_SYMBOL(ll_back_merge_fn);
1446 1449
1447static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1450static int ll_front_merge_fn(struct request_queue *q, struct request *req,
1448 struct bio *bio) 1451 struct bio *bio)
1449{ 1452{
1450 unsigned short max_sectors; 1453 unsigned short max_sectors;
@@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1483 return ll_new_hw_segment(q, req, bio); 1486 return ll_new_hw_segment(q, req, bio);
1484} 1487}
1485 1488
1486static int ll_merge_requests_fn(request_queue_t *q, struct request *req, 1489static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
1487 struct request *next) 1490 struct request *next)
1488{ 1491{
1489 int total_phys_segments; 1492 int total_phys_segments;
@@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
1539 * This is called with interrupts off and no requests on the queue and 1542 * This is called with interrupts off and no requests on the queue and
1540 * with the queue lock held. 1543 * with the queue lock held.
1541 */ 1544 */
1542void blk_plug_device(request_queue_t *q) 1545void blk_plug_device(struct request_queue *q)
1543{ 1546{
1544 WARN_ON(!irqs_disabled()); 1547 WARN_ON(!irqs_disabled());
1545 1548
@@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device);
1562 * remove the queue from the plugged list, if present. called with 1565 * remove the queue from the plugged list, if present. called with
1563 * queue lock held and interrupts disabled. 1566 * queue lock held and interrupts disabled.
1564 */ 1567 */
1565int blk_remove_plug(request_queue_t *q) 1568int blk_remove_plug(struct request_queue *q)
1566{ 1569{
1567 WARN_ON(!irqs_disabled()); 1570 WARN_ON(!irqs_disabled());
1568 1571
@@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug);
1578/* 1581/*
1579 * remove the plug and let it rip.. 1582 * remove the plug and let it rip..
1580 */ 1583 */
1581void __generic_unplug_device(request_queue_t *q) 1584void __generic_unplug_device(struct request_queue *q)
1582{ 1585{
1583 if (unlikely(blk_queue_stopped(q))) 1586 if (unlikely(blk_queue_stopped(q)))
1584 return; 1587 return;
@@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
1592 1595
1593/** 1596/**
1594 * generic_unplug_device - fire a request queue 1597 * generic_unplug_device - fire a request queue
1595 * @q: The &request_queue_t in question 1598 * @q: The &struct request_queue in question
1596 * 1599 *
1597 * Description: 1600 * Description:
1598 * Linux uses plugging to build bigger requests queues before letting 1601 * Linux uses plugging to build bigger requests queues before letting
@@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
1601 * gets unplugged, the request_fn defined for the queue is invoked and 1604 * gets unplugged, the request_fn defined for the queue is invoked and
1602 * transfers started. 1605 * transfers started.
1603 **/ 1606 **/
1604void generic_unplug_device(request_queue_t *q) 1607void generic_unplug_device(struct request_queue *q)
1605{ 1608{
1606 spin_lock_irq(q->queue_lock); 1609 spin_lock_irq(q->queue_lock);
1607 __generic_unplug_device(q); 1610 __generic_unplug_device(q);
@@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device);
1612static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 1615static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1613 struct page *page) 1616 struct page *page)
1614{ 1617{
1615 request_queue_t *q = bdi->unplug_io_data; 1618 struct request_queue *q = bdi->unplug_io_data;
1616 1619
1617 /* 1620 /*
1618 * devices don't necessarily have an ->unplug_fn defined 1621 * devices don't necessarily have an ->unplug_fn defined
@@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1627 1630
1628static void blk_unplug_work(struct work_struct *work) 1631static void blk_unplug_work(struct work_struct *work)
1629{ 1632{
1630 request_queue_t *q = container_of(work, request_queue_t, unplug_work); 1633 struct request_queue *q =
1634 container_of(work, struct request_queue, unplug_work);
1631 1635
1632 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1636 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1633 q->rq.count[READ] + q->rq.count[WRITE]); 1637 q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work)
1637 1641
1638static void blk_unplug_timeout(unsigned long data) 1642static void blk_unplug_timeout(unsigned long data)
1639{ 1643{
1640 request_queue_t *q = (request_queue_t *)data; 1644 struct request_queue *q = (struct request_queue *)data;
1641 1645
1642 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 1646 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
1643 q->rq.count[READ] + q->rq.count[WRITE]); 1647 q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data)
1647 1651
1648/** 1652/**
1649 * blk_start_queue - restart a previously stopped queue 1653 * blk_start_queue - restart a previously stopped queue
1650 * @q: The &request_queue_t in question 1654 * @q: The &struct request_queue in question
1651 * 1655 *
1652 * Description: 1656 * Description:
1653 * blk_start_queue() will clear the stop flag on the queue, and call 1657 * blk_start_queue() will clear the stop flag on the queue, and call
1654 * the request_fn for the queue if it was in a stopped state when 1658 * the request_fn for the queue if it was in a stopped state when
1655 * entered. Also see blk_stop_queue(). Queue lock must be held. 1659 * entered. Also see blk_stop_queue(). Queue lock must be held.
1656 **/ 1660 **/
1657void blk_start_queue(request_queue_t *q) 1661void blk_start_queue(struct request_queue *q)
1658{ 1662{
1659 WARN_ON(!irqs_disabled()); 1663 WARN_ON(!irqs_disabled());
1660 1664
@@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue);
1677 1681
1678/** 1682/**
1679 * blk_stop_queue - stop a queue 1683 * blk_stop_queue - stop a queue
1680 * @q: The &request_queue_t in question 1684 * @q: The &struct request_queue in question
1681 * 1685 *
1682 * Description: 1686 * Description:
1683 * The Linux block layer assumes that a block driver will consume all 1687 * The Linux block layer assumes that a block driver will consume all
@@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue);
1689 * the driver has signalled it's ready to go again. This happens by calling 1693 * the driver has signalled it's ready to go again. This happens by calling
1690 * blk_start_queue() to restart queue operations. Queue lock must be held. 1694 * blk_start_queue() to restart queue operations. Queue lock must be held.
1691 **/ 1695 **/
1692void blk_stop_queue(request_queue_t *q) 1696void blk_stop_queue(struct request_queue *q)
1693{ 1697{
1694 blk_remove_plug(q); 1698 blk_remove_plug(q);
1695 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 1699 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
@@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q)
1746EXPORT_SYMBOL(blk_run_queue); 1750EXPORT_SYMBOL(blk_run_queue);
1747 1751
1748/** 1752/**
1749 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed 1753 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
1750 * @kobj: the kobj belonging of the request queue to be released 1754 * @kobj: the kobj belonging of the request queue to be released
1751 * 1755 *
1752 * Description: 1756 * Description:
@@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue);
1762 **/ 1766 **/
1763static void blk_release_queue(struct kobject *kobj) 1767static void blk_release_queue(struct kobject *kobj)
1764{ 1768{
1765 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 1769 struct request_queue *q =
1770 container_of(kobj, struct request_queue, kobj);
1766 struct request_list *rl = &q->rq; 1771 struct request_list *rl = &q->rq;
1767 1772
1768 blk_sync_queue(q); 1773 blk_sync_queue(q);
@@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj)
1778 kmem_cache_free(requestq_cachep, q); 1783 kmem_cache_free(requestq_cachep, q);
1779} 1784}
1780 1785
1781void blk_put_queue(request_queue_t *q) 1786void blk_put_queue(struct request_queue *q)
1782{ 1787{
1783 kobject_put(&q->kobj); 1788 kobject_put(&q->kobj);
1784} 1789}
1785EXPORT_SYMBOL(blk_put_queue); 1790EXPORT_SYMBOL(blk_put_queue);
1786 1791
1787void blk_cleanup_queue(request_queue_t * q) 1792void blk_cleanup_queue(struct request_queue * q)
1788{ 1793{
1789 mutex_lock(&q->sysfs_lock); 1794 mutex_lock(&q->sysfs_lock);
1790 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 1795 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q)
1798 1803
1799EXPORT_SYMBOL(blk_cleanup_queue); 1804EXPORT_SYMBOL(blk_cleanup_queue);
1800 1805
1801static int blk_init_free_list(request_queue_t *q) 1806static int blk_init_free_list(struct request_queue *q)
1802{ 1807{
1803 struct request_list *rl = &q->rq; 1808 struct request_list *rl = &q->rq;
1804 1809
@@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q)
1817 return 0; 1822 return 0;
1818} 1823}
1819 1824
1820request_queue_t *blk_alloc_queue(gfp_t gfp_mask) 1825struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1821{ 1826{
1822 return blk_alloc_queue_node(gfp_mask, -1); 1827 return blk_alloc_queue_node(gfp_mask, -1);
1823} 1828}
@@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue);
1825 1830
1826static struct kobj_type queue_ktype; 1831static struct kobj_type queue_ktype;
1827 1832
1828request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1833struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1829{ 1834{
1830 request_queue_t *q; 1835 struct request_queue *q;
1831 1836
1832 q = kmem_cache_alloc_node(requestq_cachep, 1837 q = kmem_cache_alloc_node(requestq_cachep,
1833 gfp_mask | __GFP_ZERO, node_id); 1838 gfp_mask | __GFP_ZERO, node_id);
@@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
1882 * when the block device is deactivated (such as at module unload). 1887 * when the block device is deactivated (such as at module unload).
1883 **/ 1888 **/
1884 1889
1885request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1890struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1886{ 1891{
1887 return blk_init_queue_node(rfn, lock, -1); 1892 return blk_init_queue_node(rfn, lock, -1);
1888} 1893}
1889EXPORT_SYMBOL(blk_init_queue); 1894EXPORT_SYMBOL(blk_init_queue);
1890 1895
1891request_queue_t * 1896struct request_queue *
1892blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1897blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1893{ 1898{
1894 request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 1899 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1895 1900
1896 if (!q) 1901 if (!q)
1897 return NULL; 1902 return NULL;
@@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1940} 1945}
1941EXPORT_SYMBOL(blk_init_queue_node); 1946EXPORT_SYMBOL(blk_init_queue_node);
1942 1947
1943int blk_get_queue(request_queue_t *q) 1948int blk_get_queue(struct request_queue *q)
1944{ 1949{
1945 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 1950 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
1946 kobject_get(&q->kobj); 1951 kobject_get(&q->kobj);
@@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q)
1952 1957
1953EXPORT_SYMBOL(blk_get_queue); 1958EXPORT_SYMBOL(blk_get_queue);
1954 1959
1955static inline void blk_free_request(request_queue_t *q, struct request *rq) 1960static inline void blk_free_request(struct request_queue *q, struct request *rq)
1956{ 1961{
1957 if (rq->cmd_flags & REQ_ELVPRIV) 1962 if (rq->cmd_flags & REQ_ELVPRIV)
1958 elv_put_request(q, rq); 1963 elv_put_request(q, rq);
@@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
1960} 1965}
1961 1966
1962static struct request * 1967static struct request *
1963blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) 1968blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
1964{ 1969{
1965 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 1970 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1966 1971
@@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
1988 * ioc_batching returns true if the ioc is a valid batching request and 1993 * ioc_batching returns true if the ioc is a valid batching request and
1989 * should be given priority access to a request. 1994 * should be given priority access to a request.
1990 */ 1995 */
1991static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) 1996static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1992{ 1997{
1993 if (!ioc) 1998 if (!ioc)
1994 return 0; 1999 return 0;
@@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
2009 * is the behaviour we want though - once it gets a wakeup it should be given 2014 * is the behaviour we want though - once it gets a wakeup it should be given
2010 * a nice run. 2015 * a nice run.
2011 */ 2016 */
2012static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 2017static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
2013{ 2018{
2014 if (!ioc || ioc_batching(q, ioc)) 2019 if (!ioc || ioc_batching(q, ioc))
2015 return; 2020 return;
@@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
2018 ioc->last_waited = jiffies; 2023 ioc->last_waited = jiffies;
2019} 2024}
2020 2025
2021static void __freed_request(request_queue_t *q, int rw) 2026static void __freed_request(struct request_queue *q, int rw)
2022{ 2027{
2023 struct request_list *rl = &q->rq; 2028 struct request_list *rl = &q->rq;
2024 2029
@@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw)
2037 * A request has just been released. Account for it, update the full and 2042 * A request has just been released. Account for it, update the full and
2038 * congestion status, wake up any waiters. Called under q->queue_lock. 2043 * congestion status, wake up any waiters. Called under q->queue_lock.
2039 */ 2044 */
2040static void freed_request(request_queue_t *q, int rw, int priv) 2045static void freed_request(struct request_queue *q, int rw, int priv)
2041{ 2046{
2042 struct request_list *rl = &q->rq; 2047 struct request_list *rl = &q->rq;
2043 2048
@@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv)
2057 * Returns NULL on failure, with queue_lock held. 2062 * Returns NULL on failure, with queue_lock held.
2058 * Returns !NULL on success, with queue_lock *not held*. 2063 * Returns !NULL on success, with queue_lock *not held*.
2059 */ 2064 */
2060static struct request *get_request(request_queue_t *q, int rw_flags, 2065static struct request *get_request(struct request_queue *q, int rw_flags,
2061 struct bio *bio, gfp_t gfp_mask) 2066 struct bio *bio, gfp_t gfp_mask)
2062{ 2067{
2063 struct request *rq = NULL; 2068 struct request *rq = NULL;
@@ -2162,7 +2167,7 @@ out:
2162 * 2167 *
2163 * Called with q->queue_lock held, and returns with it unlocked. 2168 * Called with q->queue_lock held, and returns with it unlocked.
2164 */ 2169 */
2165static struct request *get_request_wait(request_queue_t *q, int rw_flags, 2170static struct request *get_request_wait(struct request_queue *q, int rw_flags,
2166 struct bio *bio) 2171 struct bio *bio)
2167{ 2172{
2168 const int rw = rw_flags & 0x01; 2173 const int rw = rw_flags & 0x01;
@@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
2204 return rq; 2209 return rq;
2205} 2210}
2206 2211
2207struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) 2212struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
2208{ 2213{
2209 struct request *rq; 2214 struct request *rq;
2210 2215
@@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request);
2234 * 2239 *
2235 * The queue lock must be held with interrupts disabled. 2240 * The queue lock must be held with interrupts disabled.
2236 */ 2241 */
2237void blk_start_queueing(request_queue_t *q) 2242void blk_start_queueing(struct request_queue *q)
2238{ 2243{
2239 if (!blk_queue_plugged(q)) 2244 if (!blk_queue_plugged(q))
2240 q->request_fn(q); 2245 q->request_fn(q);
@@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing);
2253 * more, when that condition happens we need to put the request back 2258 * more, when that condition happens we need to put the request back
2254 * on the queue. Must be called with queue lock held. 2259 * on the queue. Must be called with queue lock held.
2255 */ 2260 */
2256void blk_requeue_request(request_queue_t *q, struct request *rq) 2261void blk_requeue_request(struct request_queue *q, struct request *rq)
2257{ 2262{
2258 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 2263 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
2259 2264
@@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request);
2284 * of the queue for things like a QUEUE_FULL message from a device, or a 2289 * of the queue for things like a QUEUE_FULL message from a device, or a
2285 * host that is unable to accept a particular command. 2290 * host that is unable to accept a particular command.
2286 */ 2291 */
2287void blk_insert_request(request_queue_t *q, struct request *rq, 2292void blk_insert_request(struct request_queue *q, struct request *rq,
2288 int at_head, void *data) 2293 int at_head, void *data)
2289{ 2294{
2290 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2295 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
2330 return ret; 2335 return ret;
2331} 2336}
2332 2337
2333static int __blk_rq_map_user(request_queue_t *q, struct request *rq, 2338static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
2334 void __user *ubuf, unsigned int len) 2339 void __user *ubuf, unsigned int len)
2335{ 2340{
2336 unsigned long uaddr; 2341 unsigned long uaddr;
@@ -2403,8 +2408,8 @@ unmap_bio:
2403 * original bio must be passed back in to blk_rq_unmap_user() for proper 2408 * original bio must be passed back in to blk_rq_unmap_user() for proper
2404 * unmapping. 2409 * unmapping.
2405 */ 2410 */
2406int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, 2411int blk_rq_map_user(struct request_queue *q, struct request *rq,
2407 unsigned long len) 2412 void __user *ubuf, unsigned long len)
2408{ 2413{
2409 unsigned long bytes_read = 0; 2414 unsigned long bytes_read = 0;
2410 struct bio *bio = NULL; 2415 struct bio *bio = NULL;
@@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
2470 * original bio must be passed back in to blk_rq_unmap_user() for proper 2475 * original bio must be passed back in to blk_rq_unmap_user() for proper
2471 * unmapping. 2476 * unmapping.
2472 */ 2477 */
2473int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, 2478int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
2474 struct sg_iovec *iov, int iov_count, unsigned int len) 2479 struct sg_iovec *iov, int iov_count, unsigned int len)
2475{ 2480{
2476 struct bio *bio; 2481 struct bio *bio;
@@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
2540 * @len: length of user data 2545 * @len: length of user data
2541 * @gfp_mask: memory allocation flags 2546 * @gfp_mask: memory allocation flags
2542 */ 2547 */
2543int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, 2548int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
2544 unsigned int len, gfp_t gfp_mask) 2549 unsigned int len, gfp_t gfp_mask)
2545{ 2550{
2546 struct bio *bio; 2551 struct bio *bio;
@@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
2577 * Insert a fully prepared request at the back of the io scheduler queue 2582 * Insert a fully prepared request at the back of the io scheduler queue
2578 * for execution. Don't wait for completion. 2583 * for execution. Don't wait for completion.
2579 */ 2584 */
2580void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2585void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
2581 struct request *rq, int at_head, 2586 struct request *rq, int at_head,
2582 rq_end_io_fn *done) 2587 rq_end_io_fn *done)
2583{ 2588{
@@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
2605 * Insert a fully prepared request at the back of the io scheduler queue 2610 * Insert a fully prepared request at the back of the io scheduler queue
2606 * for execution and wait for completion. 2611 * for execution and wait for completion.
2607 */ 2612 */
2608int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2613int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
2609 struct request *rq, int at_head) 2614 struct request *rq, int at_head)
2610{ 2615{
2611 DECLARE_COMPLETION_ONSTACK(wait); 2616 DECLARE_COMPLETION_ONSTACK(wait);
@@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq);
2648 */ 2653 */
2649int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2654int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2650{ 2655{
2651 request_queue_t *q; 2656 struct request_queue *q;
2652 2657
2653 if (bdev->bd_disk == NULL) 2658 if (bdev->bd_disk == NULL)
2654 return -ENXIO; 2659 return -ENXIO;
@@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2684 * queue lock is held and interrupts disabled, as we muck with the 2689 * queue lock is held and interrupts disabled, as we muck with the
2685 * request queue list. 2690 * request queue list.
2686 */ 2691 */
2687static inline void add_request(request_queue_t * q, struct request * req) 2692static inline void add_request(struct request_queue * q, struct request * req)
2688{ 2693{
2689 drive_stat_acct(req, req->nr_sectors, 1); 2694 drive_stat_acct(req, req->nr_sectors, 1);
2690 2695
@@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats);
2730/* 2735/*
2731 * queue lock must be held 2736 * queue lock must be held
2732 */ 2737 */
2733void __blk_put_request(request_queue_t *q, struct request *req) 2738void __blk_put_request(struct request_queue *q, struct request *req)
2734{ 2739{
2735 if (unlikely(!q)) 2740 if (unlikely(!q))
2736 return; 2741 return;
@@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
2760void blk_put_request(struct request *req) 2765void blk_put_request(struct request *req)
2761{ 2766{
2762 unsigned long flags; 2767 unsigned long flags;
2763 request_queue_t *q = req->q; 2768 struct request_queue *q = req->q;
2764 2769
2765 /* 2770 /*
2766 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the 2771 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
@@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq);
2798/* 2803/*
2799 * Has to be called with the request spinlock acquired 2804 * Has to be called with the request spinlock acquired
2800 */ 2805 */
2801static int attempt_merge(request_queue_t *q, struct request *req, 2806static int attempt_merge(struct request_queue *q, struct request *req,
2802 struct request *next) 2807 struct request *next)
2803{ 2808{
2804 if (!rq_mergeable(req) || !rq_mergeable(next)) 2809 if (!rq_mergeable(req) || !rq_mergeable(next))
@@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2851 return 1; 2856 return 1;
2852} 2857}
2853 2858
2854static inline int attempt_back_merge(request_queue_t *q, struct request *rq) 2859static inline int attempt_back_merge(struct request_queue *q,
2860 struct request *rq)
2855{ 2861{
2856 struct request *next = elv_latter_request(q, rq); 2862 struct request *next = elv_latter_request(q, rq);
2857 2863
@@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2861 return 0; 2867 return 0;
2862} 2868}
2863 2869
2864static inline int attempt_front_merge(request_queue_t *q, struct request *rq) 2870static inline int attempt_front_merge(struct request_queue *q,
2871 struct request *rq)
2865{ 2872{
2866 struct request *prev = elv_former_request(q, rq); 2873 struct request *prev = elv_former_request(q, rq);
2867 2874
@@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
2905 req->start_time = jiffies; 2912 req->start_time = jiffies;
2906} 2913}
2907 2914
2908static int __make_request(request_queue_t *q, struct bio *bio) 2915static int __make_request(struct request_queue *q, struct bio *bio)
2909{ 2916{
2910 struct request *req; 2917 struct request *req;
2911 int el_ret, nr_sectors, barrier, err; 2918 int el_ret, nr_sectors, barrier, err;
@@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio)
3119 */ 3126 */
3120static inline void __generic_make_request(struct bio *bio) 3127static inline void __generic_make_request(struct bio *bio)
3121{ 3128{
3122 request_queue_t *q; 3129 struct request_queue *q;
3123 sector_t maxsector; 3130 sector_t maxsector;
3124 sector_t old_sector; 3131 sector_t old_sector;
3125 int ret, nr_sectors = bio_sectors(bio); 3132 int ret, nr_sectors = bio_sectors(bio);
@@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq)
3312 struct bio *bio, *prevbio = NULL; 3319 struct bio *bio, *prevbio = NULL;
3313 int nr_phys_segs, nr_hw_segs; 3320 int nr_phys_segs, nr_hw_segs;
3314 unsigned int phys_size, hw_size; 3321 unsigned int phys_size, hw_size;
3315 request_queue_t *q = rq->q; 3322 struct request_queue *q = rq->q;
3316 3323
3317 if (!rq->bio) 3324 if (!rq->bio)
3318 return; 3325 return;
@@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate)
3658 3665
3659EXPORT_SYMBOL(end_request); 3666EXPORT_SYMBOL(end_request);
3660 3667
3661void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3668void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3669 struct bio *bio)
3662{ 3670{
3663 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 3671 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3664 rq->cmd_flags |= (bio->bi_rw & 3); 3672 rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3701,7 +3709,7 @@ int __init blk_dev_init(void)
3701 sizeof(struct request), 0, SLAB_PANIC, NULL); 3709 sizeof(struct request), 0, SLAB_PANIC, NULL);
3702 3710
3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3711 requestq_cachep = kmem_cache_create("blkdev_queue",
3704 sizeof(request_queue_t), 0, SLAB_PANIC, NULL); 3712 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3705 3713
3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3714 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3707 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 3715 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
@@ -4021,7 +4029,8 @@ static ssize_t
4021queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4029queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4022{ 4030{
4023 struct queue_sysfs_entry *entry = to_queue(attr); 4031 struct queue_sysfs_entry *entry = to_queue(attr);
4024 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4032 struct request_queue *q =
4033 container_of(kobj, struct request_queue, kobj);
4025 ssize_t res; 4034 ssize_t res;
4026 4035
4027 if (!entry->show) 4036 if (!entry->show)
@@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
4041 const char *page, size_t length) 4050 const char *page, size_t length)
4042{ 4051{
4043 struct queue_sysfs_entry *entry = to_queue(attr); 4052 struct queue_sysfs_entry *entry = to_queue(attr);
4044 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4053 struct request_queue *q = container_of(kobj, struct request_queue, kobj);
4045 4054
4046 ssize_t res; 4055 ssize_t res;
4047 4056
@@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk)
4072{ 4081{
4073 int ret; 4082 int ret;
4074 4083
4075 request_queue_t *q = disk->queue; 4084 struct request_queue *q = disk->queue;
4076 4085
4077 if (!q || !q->request_fn) 4086 if (!q || !q->request_fn)
4078 return -ENXIO; 4087 return -ENXIO;
@@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk)
4097 4106
4098void blk_unregister_queue(struct gendisk *disk) 4107void blk_unregister_queue(struct gendisk *disk)
4099{ 4108{
4100 request_queue_t *q = disk->queue; 4109 struct request_queue *q = disk->queue;
4101 4110
4102 if (q && q->request_fn) { 4111 if (q && q->request_fn) {
4103 elv_unregister_queue(q); 4112 elv_unregister_queue(q);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 1c3de2b9a6b..7563d8aa394 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -11,13 +11,13 @@ struct noop_data {
11 struct list_head queue; 11 struct list_head queue;
12}; 12};
13 13
14static void noop_merged_requests(request_queue_t *q, struct request *rq, 14static void noop_merged_requests(struct request_queue *q, struct request *rq,
15 struct request *next) 15 struct request *next)
16{ 16{
17 list_del_init(&next->queuelist); 17 list_del_init(&next->queuelist);
18} 18}
19 19
20static int noop_dispatch(request_queue_t *q, int force) 20static int noop_dispatch(struct request_queue *q, int force)
21{ 21{
22 struct noop_data *nd = q->elevator->elevator_data; 22 struct noop_data *nd = q->elevator->elevator_data;
23 23
@@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force)
31 return 0; 31 return 0;
32} 32}
33 33
34static void noop_add_request(request_queue_t *q, struct request *rq) 34static void noop_add_request(struct request_queue *q, struct request *rq)
35{ 35{
36 struct noop_data *nd = q->elevator->elevator_data; 36 struct noop_data *nd = q->elevator->elevator_data;
37 37
38 list_add_tail(&rq->queuelist, &nd->queue); 38 list_add_tail(&rq->queuelist, &nd->queue);
39} 39}
40 40
41static int noop_queue_empty(request_queue_t *q) 41static int noop_queue_empty(struct request_queue *q)
42{ 42{
43 struct noop_data *nd = q->elevator->elevator_data; 43 struct noop_data *nd = q->elevator->elevator_data;
44 44
@@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q)
46} 46}
47 47
48static struct request * 48static struct request *
49noop_former_request(request_queue_t *q, struct request *rq) 49noop_former_request(struct request_queue *q, struct request *rq)
50{ 50{
51 struct noop_data *nd = q->elevator->elevator_data; 51 struct noop_data *nd = q->elevator->elevator_data;
52 52
@@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq)
56} 56}
57 57
58static struct request * 58static struct request *
59noop_latter_request(request_queue_t *q, struct request *rq) 59noop_latter_request(struct request_queue *q, struct request *rq)
60{ 60{
61 struct noop_data *nd = q->elevator->elevator_data; 61 struct noop_data *nd = q->elevator->elevator_data;
62 62
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
65 return list_entry(rq->queuelist.next, struct request, queuelist); 65 return list_entry(rq->queuelist.next, struct request, queuelist);
66} 66}
67 67
68static void *noop_init_queue(request_queue_t *q) 68static void *noop_init_queue(struct request_queue *q)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index d359a715bbc..91c73224f4c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -49,22 +49,22 @@ static int sg_get_version(int __user *p)
49 return put_user(sg_version_num, p); 49 return put_user(sg_version_num, p);
50} 50}
51 51
52static int scsi_get_idlun(request_queue_t *q, int __user *p) 52static int scsi_get_idlun(struct request_queue *q, int __user *p)
53{ 53{
54 return put_user(0, p); 54 return put_user(0, p);
55} 55}
56 56
57static int scsi_get_bus(request_queue_t *q, int __user *p) 57static int scsi_get_bus(struct request_queue *q, int __user *p)
58{ 58{
59 return put_user(0, p); 59 return put_user(0, p);
60} 60}
61 61
62static int sg_get_timeout(request_queue_t *q) 62static int sg_get_timeout(struct request_queue *q)
63{ 63{
64 return q->sg_timeout / (HZ / USER_HZ); 64 return q->sg_timeout / (HZ / USER_HZ);
65} 65}
66 66
67static int sg_set_timeout(request_queue_t *q, int __user *p) 67static int sg_set_timeout(struct request_queue *q, int __user *p)
68{ 68{
69 int timeout, err = get_user(timeout, p); 69 int timeout, err = get_user(timeout, p);
70 70
@@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
74 return err; 74 return err;
75} 75}
76 76
77static int sg_get_reserved_size(request_queue_t *q, int __user *p) 77static int sg_get_reserved_size(struct request_queue *q, int __user *p)
78{ 78{
79 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 79 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
80 80
81 return put_user(val, p); 81 return put_user(val, p);
82} 82}
83 83
84static int sg_set_reserved_size(request_queue_t *q, int __user *p) 84static int sg_set_reserved_size(struct request_queue *q, int __user *p)
85{ 85{
86 int size, err = get_user(size, p); 86 int size, err = get_user(size, p);
87 87
@@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p)
101 * will always return that we are ATAPI even for a real SCSI drive, I'm not 101 * will always return that we are ATAPI even for a real SCSI drive, I'm not
102 * so sure this is worth doing anything about (why would you care??) 102 * so sure this is worth doing anything about (why would you care??)
103 */ 103 */
104static int sg_emulated_host(request_queue_t *q, int __user *p) 104static int sg_emulated_host(struct request_queue *q, int __user *p)
105{ 105{
106 return put_user(1, p); 106 return put_user(1, p);
107} 107}
@@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm)
214} 214}
215EXPORT_SYMBOL_GPL(blk_verify_command); 215EXPORT_SYMBOL_GPL(blk_verify_command);
216 216
217static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq, 217static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
218 struct sg_io_hdr *hdr, int has_write_perm) 218 struct sg_io_hdr *hdr, int has_write_perm)
219{ 219{
220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
286 return r; 286 return r;
287} 287}
288 288
289static int sg_io(struct file *file, request_queue_t *q, 289static int sg_io(struct file *file, struct request_queue *q,
290 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 290 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
291{ 291{
292 unsigned long start_time; 292 unsigned long start_time;
@@ -519,7 +519,8 @@ error:
519EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 519EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
520 520
521/* Send basic block requests */ 521/* Send basic block requests */
522static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) 522static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
523 int cmd, int data)
523{ 524{
524 struct request *rq; 525 struct request *rq;
525 int err; 526 int err;
@@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
539 return err; 540 return err;
540} 541}
541 542
542static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data) 543static inline int blk_send_start_stop(struct request_queue *q,
544 struct gendisk *bd_disk, int data)
543{ 545{
544 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); 546 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
545} 547}
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 423ed08fb6f..d7e18ce8dad 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -372,7 +372,7 @@ static int fd_test_drive_present(int drive);
372static void config_types(void); 372static void config_types(void);
373static int floppy_open(struct inode *inode, struct file *filp); 373static int floppy_open(struct inode *inode, struct file *filp);
374static int floppy_release(struct inode *inode, struct file *filp); 374static int floppy_release(struct inode *inode, struct file *filp);
375static void do_fd_request(request_queue_t *); 375static void do_fd_request(struct request_queue *);
376 376
377/************************* End of Prototypes **************************/ 377/************************* End of Prototypes **************************/
378 378
@@ -1271,7 +1271,7 @@ static void fd1772_checkint(void)
1271 } 1271 }
1272} 1272}
1273 1273
1274static void do_fd_request(request_queue_t* q) 1274static void do_fd_request(struct request_queue* q)
1275{ 1275{
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c
index d85520f78e6..74058db674d 100644
--- a/drivers/acorn/block/mfmhd.c
+++ b/drivers/acorn/block/mfmhd.c
@@ -924,7 +924,7 @@ static void mfm_request(void)
924 DBG("mfm_request: Dropping out bottom\n"); 924 DBG("mfm_request: Dropping out bottom\n");
925} 925}
926 926
927static void do_mfm_request(request_queue_t *q) 927static void do_mfm_request(struct request_queue *q)
928{ 928{
929 DBG("do_mfm_request: about to mfm_request\n"); 929 DBG("do_mfm_request: about to mfm_request\n");
930 mfm_request(); 930 mfm_request();
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 12ac0b511f7..e83647651b3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -768,7 +768,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
768 * Decrement max hw segments accordingly. 768 * Decrement max hw segments accordingly.
769 */ 769 */
770 if (dev->class == ATA_DEV_ATAPI) { 770 if (dev->class == ATA_DEV_ATAPI) {
771 request_queue_t *q = sdev->request_queue; 771 struct request_queue *q = sdev->request_queue;
772 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 772 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
773 } 773 }
774 774
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6ce8b897e26..c9751b2b57e 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1422,7 +1422,7 @@ static void redo_fd_request(void)
1422 goto repeat; 1422 goto repeat;
1423} 1423}
1424 1424
1425static void do_fd_request(request_queue_t * q) 1425static void do_fd_request(struct request_queue * q)
1426{ 1426{
1427 redo_fd_request(); 1427 redo_fd_request();
1428} 1428}
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 1d846681794..ba07f762c4c 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -138,7 +138,7 @@ struct aoedev {
138 u16 maxbcnt; 138 u16 maxbcnt;
139 struct work_struct work;/* disk create work struct */ 139 struct work_struct work;/* disk create work struct */
140 struct gendisk *gd; 140 struct gendisk *gd;
141 request_queue_t blkq; 141 struct request_queue blkq;
142 struct hd_geometry geo; 142 struct hd_geometry geo;
143 sector_t ssize; 143 sector_t ssize;
144 struct timer_list timer; 144 struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 4f598270fa3..007faaf008e 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -125,7 +125,7 @@ aoeblk_release(struct inode *inode, struct file *filp)
125} 125}
126 126
127static int 127static int
128aoeblk_make_request(request_queue_t *q, struct bio *bio) 128aoeblk_make_request(struct request_queue *q, struct bio *bio)
129{ 129{
130 struct aoedev *d; 130 struct aoedev *d;
131 struct buf *buf; 131 struct buf *buf;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 14d6b949275..94268c75d04 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1466,7 +1466,7 @@ repeat:
1466} 1466}
1467 1467
1468 1468
1469void do_fd_request(request_queue_t * q) 1469void do_fd_request(struct request_queue * q)
1470{ 1470{
1471 unsigned long flags; 1471 unsigned long flags;
1472 1472
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a2d6612b80d..1be82d544dc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -139,7 +139,7 @@ static struct board_type products[] = {
139 139
140static ctlr_info_t *hba[MAX_CTLR]; 140static ctlr_info_t *hba[MAX_CTLR];
141 141
142static void do_cciss_request(request_queue_t *q); 142static void do_cciss_request(struct request_queue *q);
143static irqreturn_t do_cciss_intr(int irq, void *dev_id); 143static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144static int cciss_open(struct inode *inode, struct file *filep); 144static int cciss_open(struct inode *inode, struct file *filep);
145static int cciss_release(struct inode *inode, struct file *filep); 145static int cciss_release(struct inode *inode, struct file *filep);
@@ -1584,7 +1584,7 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1584 */ 1584 */
1585 if (h->gendisk[0] != disk) { 1585 if (h->gendisk[0] != disk) {
1586 if (disk) { 1586 if (disk) {
1587 request_queue_t *q = disk->queue; 1587 struct request_queue *q = disk->queue;
1588 if (disk->flags & GENHD_FL_UP) 1588 if (disk->flags & GENHD_FL_UP)
1589 del_gendisk(disk); 1589 del_gendisk(disk);
1590 if (q) { 1590 if (q) {
@@ -2511,7 +2511,7 @@ after_error_processing:
2511/* 2511/*
2512 * Get a request and submit it to the controller. 2512 * Get a request and submit it to the controller.
2513 */ 2513 */
2514static void do_cciss_request(request_queue_t *q) 2514static void do_cciss_request(struct request_queue *q)
2515{ 2515{
2516 ctlr_info_t *h = q->queuedata; 2516 ctlr_info_t *h = q->queuedata;
2517 CommandList_struct *c; 2517 CommandList_struct *c;
@@ -3380,7 +3380,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3380 do { 3380 do {
3381 drive_info_struct *drv = &(hba[i]->drv[j]); 3381 drive_info_struct *drv = &(hba[i]->drv[j]);
3382 struct gendisk *disk = hba[i]->gendisk[j]; 3382 struct gendisk *disk = hba[i]->gendisk[j];
3383 request_queue_t *q; 3383 struct request_queue *q;
3384 3384
3385 /* Check if the disk was allocated already */ 3385 /* Check if the disk was allocated already */
3386 if (!disk){ 3386 if (!disk){
@@ -3523,7 +3523,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3523 for (j = 0; j < CISS_MAX_LUN; j++) { 3523 for (j = 0; j < CISS_MAX_LUN; j++) {
3524 struct gendisk *disk = hba[i]->gendisk[j]; 3524 struct gendisk *disk = hba[i]->gendisk[j];
3525 if (disk) { 3525 if (disk) {
3526 request_queue_t *q = disk->queue; 3526 struct request_queue *q = disk->queue;
3527 3527
3528 if (disk->flags & GENHD_FL_UP) 3528 if (disk->flags & GENHD_FL_UP)
3529 del_gendisk(disk); 3529 del_gendisk(disk);
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b94cd1c3213..be4e3477d83 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -161,7 +161,7 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
161static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); 161static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
162static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); 162static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
163 163
164static void do_ida_request(request_queue_t *q); 164static void do_ida_request(struct request_queue *q);
165static void start_io(ctlr_info_t *h); 165static void start_io(ctlr_info_t *h);
166 166
167static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); 167static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
@@ -391,7 +391,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
391/* pdev is NULL for eisa */ 391/* pdev is NULL for eisa */
392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
393{ 393{
394 request_queue_t *q; 394 struct request_queue *q;
395 int j; 395 int j;
396 396
397 /* 397 /*
@@ -886,7 +886,7 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
886 * are in here (either via the dummy do_ida_request functions or by being 886 * are in here (either via the dummy do_ida_request functions or by being
887 * called from the interrupt handler 887 * called from the interrupt handler
888 */ 888 */
889static void do_ida_request(request_queue_t *q) 889static void do_ida_request(struct request_queue *q)
890{ 890{
891 ctlr_info_t *h = q->queuedata; 891 ctlr_info_t *h = q->queuedata;
892 cmdlist_t *c; 892 cmdlist_t *c;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fe088045dd0..085b7794fb3 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -251,7 +251,7 @@ static int irqdma_allocated;
251 251
252static struct request *current_req; 252static struct request *current_req;
253static struct request_queue *floppy_queue; 253static struct request_queue *floppy_queue;
254static void do_fd_request(request_queue_t * q); 254static void do_fd_request(struct request_queue * q);
255 255
256#ifndef fd_get_dma_residue 256#ifndef fd_get_dma_residue
257#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) 257#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
@@ -2981,7 +2981,7 @@ static void process_fd_request(void)
2981 schedule_bh(redo_fd_request); 2981 schedule_bh(redo_fd_request);
2982} 2982}
2983 2983
2984static void do_fd_request(request_queue_t * q) 2984static void do_fd_request(struct request_queue * q)
2985{ 2985{
2986 if (max_buffer_sectors == 0) { 2986 if (max_buffer_sectors == 0) {
2987 printk("VFS: do_fd_request called on non-open device\n"); 2987 printk("VFS: do_fd_request called on non-open device\n");
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
index 1634c2dd25e..5b79d072417 100644
--- a/drivers/block/lguest_blk.c
+++ b/drivers/block/lguest_blk.c
@@ -137,7 +137,7 @@ static void do_read(struct blockdev *bd, struct request *req)
137 lguest_send_dma(bd->phys_addr, &ping); 137 lguest_send_dma(bd->phys_addr, &ping);
138} 138}
139 139
140static void do_lgb_request(request_queue_t *q) 140static void do_lgb_request(struct request_queue *q)
141{ 141{
142 struct blockdev *bd; 142 struct blockdev *bd;
143 struct request *req; 143 struct request *req;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e425daa1eac..9f015fce413 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -529,7 +529,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
529 return bio; 529 return bio;
530} 530}
531 531
532static int loop_make_request(request_queue_t *q, struct bio *old_bio) 532static int loop_make_request(struct request_queue *q, struct bio *old_bio)
533{ 533{
534 struct loop_device *lo = q->queuedata; 534 struct loop_device *lo = q->queuedata;
535 int rw = bio_rw(old_bio); 535 int rw = bio_rw(old_bio);
@@ -558,7 +558,7 @@ out:
558/* 558/*
559 * kick off io on the underlying address space 559 * kick off io on the underlying address space
560 */ 560 */
561static void loop_unplug(request_queue_t *q) 561static void loop_unplug(struct request_queue *q)
562{ 562{
563 struct loop_device *lo = q->queuedata; 563 struct loop_device *lo = q->queuedata;
564 564
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c1295102409..be92c658f06 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -100,7 +100,7 @@ static const char *nbdcmd_to_ascii(int cmd)
100static void nbd_end_request(struct request *req) 100static void nbd_end_request(struct request *req)
101{ 101{
102 int uptodate = (req->errors == 0) ? 1 : 0; 102 int uptodate = (req->errors == 0) ? 1 : 0;
103 request_queue_t *q = req->q; 103 struct request_queue *q = req->q;
104 unsigned long flags; 104 unsigned long flags;
105 105
106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
@@ -410,7 +410,7 @@ static void nbd_clear_que(struct nbd_device *lo)
410 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 410 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
411 */ 411 */
412 412
413static void do_nbd_request(request_queue_t * q) 413static void do_nbd_request(struct request_queue * q)
414{ 414{
415 struct request *req; 415 struct request *req;
416 416
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 1eeb8f2cde7..b8a994a2b01 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,7 +183,7 @@ static int pcd_packet(struct cdrom_device_info *cdi,
183static int pcd_detect(void); 183static int pcd_detect(void);
184static void pcd_probe_capabilities(void); 184static void pcd_probe_capabilities(void);
185static void do_pcd_read_drq(void); 185static void do_pcd_read_drq(void);
186static void do_pcd_request(request_queue_t * q); 186static void do_pcd_request(struct request_queue * q);
187static void do_pcd_read(void); 187static void do_pcd_read(void);
188 188
189struct pcd_unit { 189struct pcd_unit {
@@ -713,7 +713,7 @@ static int pcd_detect(void)
713/* I/O request processing */ 713/* I/O request processing */
714static struct request_queue *pcd_queue; 714static struct request_queue *pcd_queue;
715 715
716static void do_pcd_request(request_queue_t * q) 716static void do_pcd_request(struct request_queue * q)
717{ 717{
718 if (pcd_busy) 718 if (pcd_busy)
719 return; 719 return;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 31e01488eb5..df819f8a95a 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -698,7 +698,7 @@ static enum action pd_identify(struct pd_unit *disk)
698 698
699/* end of io request engine */ 699/* end of io request engine */
700 700
701static void do_pd_request(request_queue_t * q) 701static void do_pd_request(struct request_queue * q)
702{ 702{
703 if (pd_req) 703 if (pd_req)
704 return; 704 return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 5826508f673..ceffa6034e2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -202,7 +202,7 @@ module_param_array(drive3, int, NULL, 0);
202#define ATAPI_WRITE_10 0x2a 202#define ATAPI_WRITE_10 0x2a
203 203
204static int pf_open(struct inode *inode, struct file *file); 204static int pf_open(struct inode *inode, struct file *file);
205static void do_pf_request(request_queue_t * q); 205static void do_pf_request(struct request_queue * q);
206static int pf_ioctl(struct inode *inode, struct file *file, 206static int pf_ioctl(struct inode *inode, struct file *file,
207 unsigned int cmd, unsigned long arg); 207 unsigned int cmd, unsigned long arg);
208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); 208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -760,7 +760,7 @@ static void pf_end_request(int uptodate)
760 } 760 }
761} 761}
762 762
763static void do_pf_request(request_queue_t * q) 763static void do_pf_request(struct request_queue * q)
764{ 764{
765 if (pf_busy) 765 if (pf_busy)
766 return; 766 return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 31be33e4f11..fadbfd880ba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -752,7 +752,7 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio
752 */ 752 */
753static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 753static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
754{ 754{
755 request_queue_t *q = bdev_get_queue(pd->bdev); 755 struct request_queue *q = bdev_get_queue(pd->bdev);
756 struct request *rq; 756 struct request *rq;
757 int ret = 0; 757 int ret = 0;
758 758
@@ -979,7 +979,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
979 * Special care is needed if the underlying block device has a small 979 * Special care is needed if the underlying block device has a small
980 * max_phys_segments value. 980 * max_phys_segments value.
981 */ 981 */
982static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q) 982static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
983{ 983{
984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
985 /* 985 /*
@@ -2314,7 +2314,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
2314{ 2314{
2315 int ret; 2315 int ret;
2316 long lba; 2316 long lba;
2317 request_queue_t *q; 2317 struct request_queue *q;
2318 2318
2319 /* 2319 /*
2320 * We need to re-open the cdrom device without O_NONBLOCK to be able 2320 * We need to re-open the cdrom device without O_NONBLOCK to be able
@@ -2477,7 +2477,7 @@ static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int
2477 return 0; 2477 return 0;
2478} 2478}
2479 2479
2480static int pkt_make_request(request_queue_t *q, struct bio *bio) 2480static int pkt_make_request(struct request_queue *q, struct bio *bio)
2481{ 2481{
2482 struct pktcdvd_device *pd; 2482 struct pktcdvd_device *pd;
2483 char b[BDEVNAME_SIZE]; 2483 char b[BDEVNAME_SIZE];
@@ -2626,7 +2626,7 @@ end_io:
2626 2626
2627 2627
2628 2628
2629static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec) 2629static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
2630{ 2630{
2631 struct pktcdvd_device *pd = q->queuedata; 2631 struct pktcdvd_device *pd = q->queuedata;
2632 sector_t zone = ZONE(bio->bi_sector, pd); 2632 sector_t zone = ZONE(bio->bi_sector, pd);
@@ -2647,7 +2647,7 @@ static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *b
2647 2647
2648static void pkt_init_queue(struct pktcdvd_device *pd) 2648static void pkt_init_queue(struct pktcdvd_device *pd)
2649{ 2649{
2650 request_queue_t *q = pd->disk->queue; 2650 struct request_queue *q = pd->disk->queue;
2651 2651
2652 blk_queue_make_request(q, pkt_make_request); 2652 blk_queue_make_request(q, pkt_make_request);
2653 blk_queue_hardsect_size(q, CD_FRAMESIZE); 2653 blk_queue_hardsect_size(q, CD_FRAMESIZE);
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 688a4fb0dc9..3c796e23625 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -64,7 +64,7 @@ static void reset_ctrl(void);
64 64
65static int ps2esdi_geninit(void); 65static int ps2esdi_geninit(void);
66 66
67static void do_ps2esdi_request(request_queue_t * q); 67static void do_ps2esdi_request(struct request_queue * q);
68 68
69static void ps2esdi_readwrite(int cmd, struct request *req); 69static void ps2esdi_readwrite(int cmd, struct request *req);
70 70
@@ -473,7 +473,7 @@ static void __init ps2esdi_get_device_cfg(void)
473} 473}
474 474
475/* strategy routine that handles most of the IO requests */ 475/* strategy routine that handles most of the IO requests */
476static void do_ps2esdi_request(request_queue_t * q) 476static void do_ps2esdi_request(struct request_queue * q)
477{ 477{
478 struct request *req; 478 struct request *req;
479 /* since, this routine is called with interrupts cleared - they 479 /* since, this routine is called with interrupts cleared - they
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 170fb33dba9..aa8b890c80d 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -190,7 +190,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
190} 190}
191 191
192static void ps3disk_do_request(struct ps3_storage_device *dev, 192static void ps3disk_do_request(struct ps3_storage_device *dev,
193 request_queue_t *q) 193 struct request_queue *q)
194{ 194{
195 struct request *req; 195 struct request *req;
196 196
@@ -211,7 +211,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
211 } 211 }
212} 212}
213 213
214static void ps3disk_request(request_queue_t *q) 214static void ps3disk_request(struct request_queue *q)
215{ 215{
216 struct ps3_storage_device *dev = q->queuedata; 216 struct ps3_storage_device *dev = q->queuedata;
217 struct ps3disk_private *priv = dev->sbd.core.driver_data; 217 struct ps3disk_private *priv = dev->sbd.core.driver_data;
@@ -404,7 +404,7 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
404 return 0; 404 return 0;
405} 405}
406 406
407static void ps3disk_prepare_flush(request_queue_t *q, struct request *req) 407static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
408{ 408{
409 struct ps3_storage_device *dev = q->queuedata; 409 struct ps3_storage_device *dev = q->queuedata;
410 410
@@ -414,7 +414,7 @@ static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
414 req->cmd_type = REQ_TYPE_FLUSH; 414 req->cmd_type = REQ_TYPE_FLUSH;
415} 415}
416 416
417static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk, 417static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
418 sector_t *sector) 418 sector_t *sector)
419{ 419{
420 struct ps3_storage_device *dev = q->queuedata; 420 struct ps3_storage_device *dev = q->queuedata;
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index a1512da3241..65150b548f3 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -264,7 +264,7 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
264 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support 264 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support
265 * 265 *
266 */ 266 */
267static int rd_make_request(request_queue_t *q, struct bio *bio) 267static int rd_make_request(struct request_queue *q, struct bio *bio)
268{ 268{
269 struct block_device *bdev = bio->bi_bdev; 269 struct block_device *bdev = bio->bi_bdev;
270 struct address_space * mapping = bdev->bd_inode->i_mapping; 270 struct address_space * mapping = bdev->bd_inode->i_mapping;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index d50b8238115..4dff49256ac 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -444,7 +444,7 @@ out:
444 return err; 444 return err;
445} 445}
446 446
447static void do_vdc_request(request_queue_t *q) 447static void do_vdc_request(struct request_queue *q)
448{ 448{
449 while (1) { 449 while (1) {
450 struct request *req = elv_next_request(q); 450 struct request *req = elv_next_request(q);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1a65979f1f0..b4e462f154e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -225,7 +225,7 @@ static unsigned short write_postamble[] = {
225static void swim3_select(struct floppy_state *fs, int sel); 225static void swim3_select(struct floppy_state *fs, int sel);
226static void swim3_action(struct floppy_state *fs, int action); 226static void swim3_action(struct floppy_state *fs, int action);
227static int swim3_readbit(struct floppy_state *fs, int bit); 227static int swim3_readbit(struct floppy_state *fs, int bit);
228static void do_fd_request(request_queue_t * q); 228static void do_fd_request(struct request_queue * q);
229static void start_request(struct floppy_state *fs); 229static void start_request(struct floppy_state *fs);
230static void set_timeout(struct floppy_state *fs, int nticks, 230static void set_timeout(struct floppy_state *fs, int nticks,
231 void (*proc)(unsigned long)); 231 void (*proc)(unsigned long));
@@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
290 return (stat & DATA) == 0; 290 return (stat & DATA) == 0;
291} 291}
292 292
293static void do_fd_request(request_queue_t * q) 293static void do_fd_request(struct request_queue * q)
294{ 294{
295 int i; 295 int i;
296 for(i=0;i<floppy_count;i++) 296 for(i=0;i<floppy_count;i++)
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 949ae93499e..402209fec59 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -278,7 +278,7 @@ struct carm_host {
278 unsigned int state; 278 unsigned int state;
279 u32 fw_ver; 279 u32 fw_ver;
280 280
281 request_queue_t *oob_q; 281 struct request_queue *oob_q;
282 unsigned int n_oob; 282 unsigned int n_oob;
283 283
284 unsigned int hw_sg_used; 284 unsigned int hw_sg_used;
@@ -287,7 +287,7 @@ struct carm_host {
287 287
288 unsigned int wait_q_prod; 288 unsigned int wait_q_prod;
289 unsigned int wait_q_cons; 289 unsigned int wait_q_cons;
290 request_queue_t *wait_q[CARM_MAX_WAIT_Q]; 290 struct request_queue *wait_q[CARM_MAX_WAIT_Q];
291 291
292 unsigned int n_msgs; 292 unsigned int n_msgs;
293 u64 msg_alloc; 293 u64 msg_alloc;
@@ -756,7 +756,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
756 assert(rc == 0); 756 assert(rc == 0);
757} 757}
758 758
759static inline void carm_push_q (struct carm_host *host, request_queue_t *q) 759static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
760{ 760{
761 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; 761 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
762 762
@@ -768,7 +768,7 @@ static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
768 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ 768 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
769} 769}
770 770
771static inline request_queue_t *carm_pop_q(struct carm_host *host) 771static inline struct request_queue *carm_pop_q(struct carm_host *host)
772{ 772{
773 unsigned int idx; 773 unsigned int idx;
774 774
@@ -783,7 +783,7 @@ static inline request_queue_t *carm_pop_q(struct carm_host *host)
783 783
784static inline void carm_round_robin(struct carm_host *host) 784static inline void carm_round_robin(struct carm_host *host)
785{ 785{
786 request_queue_t *q = carm_pop_q(host); 786 struct request_queue *q = carm_pop_q(host);
787 if (q) { 787 if (q) {
788 blk_start_queue(q); 788 blk_start_queue(q);
789 VPRINTK("STARTED QUEUE %p\n", q); 789 VPRINTK("STARTED QUEUE %p\n", q);
@@ -802,7 +802,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
802 } 802 }
803} 803}
804 804
805static void carm_oob_rq_fn(request_queue_t *q) 805static void carm_oob_rq_fn(struct request_queue *q)
806{ 806{
807 struct carm_host *host = q->queuedata; 807 struct carm_host *host = q->queuedata;
808 struct carm_request *crq; 808 struct carm_request *crq;
@@ -833,7 +833,7 @@ static void carm_oob_rq_fn(request_queue_t *q)
833 } 833 }
834} 834}
835 835
836static void carm_rq_fn(request_queue_t *q) 836static void carm_rq_fn(struct request_queue *q)
837{ 837{
838 struct carm_port *port = q->queuedata; 838 struct carm_port *port = q->queuedata;
839 struct carm_host *host = port->host; 839 struct carm_host *host = port->host;
@@ -1494,7 +1494,7 @@ static int carm_init_disks(struct carm_host *host)
1494 1494
1495 for (i = 0; i < CARM_MAX_PORTS; i++) { 1495 for (i = 0; i < CARM_MAX_PORTS; i++) {
1496 struct gendisk *disk; 1496 struct gendisk *disk;
1497 request_queue_t *q; 1497 struct request_queue *q;
1498 struct carm_port *port; 1498 struct carm_port *port;
1499 1499
1500 port = &host->port[i]; 1500 port = &host->port[i];
@@ -1538,7 +1538,7 @@ static void carm_free_disks(struct carm_host *host)
1538 for (i = 0; i < CARM_MAX_PORTS; i++) { 1538 for (i = 0; i < CARM_MAX_PORTS; i++) {
1539 struct gendisk *disk = host->port[i].disk; 1539 struct gendisk *disk = host->port[i].disk;
1540 if (disk) { 1540 if (disk) {
1541 request_queue_t *q = disk->queue; 1541 struct request_queue *q = disk->queue;
1542 1542
1543 if (disk->flags & GENHD_FL_UP) 1543 if (disk->flags & GENHD_FL_UP)
1544 del_gendisk(disk); 1544 del_gendisk(disk);
@@ -1571,7 +1571,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1571 struct carm_host *host; 1571 struct carm_host *host;
1572 unsigned int pci_dac; 1572 unsigned int pci_dac;
1573 int rc; 1573 int rc;
1574 request_queue_t *q; 1574 struct request_queue *q;
1575 unsigned int i; 1575 unsigned int i;
1576 1576
1577 if (!printed_version++) 1577 if (!printed_version++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 8b13d7d2cb6..c57dd2b3a0c 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -503,7 +503,7 @@ static void ub_cleanup(struct ub_dev *sc)
503{ 503{
504 struct list_head *p; 504 struct list_head *p;
505 struct ub_lun *lun; 505 struct ub_lun *lun;
506 request_queue_t *q; 506 struct request_queue *q;
507 507
508 while (!list_empty(&sc->luns)) { 508 while (!list_empty(&sc->luns)) {
509 p = sc->luns.next; 509 p = sc->luns.next;
@@ -619,7 +619,7 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
619 * The request function is our main entry point 619 * The request function is our main entry point
620 */ 620 */
621 621
622static void ub_request_fn(request_queue_t *q) 622static void ub_request_fn(struct request_queue *q)
623{ 623{
624 struct ub_lun *lun = q->queuedata; 624 struct ub_lun *lun = q->queuedata;
625 struct request *rq; 625 struct request *rq;
@@ -2273,7 +2273,7 @@ err_core:
2273static int ub_probe_lun(struct ub_dev *sc, int lnum) 2273static int ub_probe_lun(struct ub_dev *sc, int lnum)
2274{ 2274{
2275 struct ub_lun *lun; 2275 struct ub_lun *lun;
2276 request_queue_t *q; 2276 struct request_queue *q;
2277 struct gendisk *disk; 2277 struct gendisk *disk;
2278 int rc; 2278 int rc;
2279 2279
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index dec74bd2349..6b7c02d6360 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -114,7 +114,7 @@ struct cardinfo {
114 */ 114 */
115 struct bio *bio, *currentbio, **biotail; 115 struct bio *bio, *currentbio, **biotail;
116 116
117 request_queue_t *queue; 117 struct request_queue *queue;
118 118
119 struct mm_page { 119 struct mm_page {
120 dma_addr_t page_dma; 120 dma_addr_t page_dma;
@@ -357,7 +357,7 @@ static inline void reset_page(struct mm_page *page)
357 page->biotail = & page->bio; 357 page->biotail = & page->bio;
358} 358}
359 359
360static void mm_unplug_device(request_queue_t *q) 360static void mm_unplug_device(struct request_queue *q)
361{ 361{
362 struct cardinfo *card = q->queuedata; 362 struct cardinfo *card = q->queuedata;
363 unsigned long flags; 363 unsigned long flags;
@@ -541,7 +541,7 @@ static void process_page(unsigned long data)
541-- mm_make_request 541-- mm_make_request
542----------------------------------------------------------------------------------- 542-----------------------------------------------------------------------------------
543*/ 543*/
544static int mm_make_request(request_queue_t *q, struct bio *bio) 544static int mm_make_request(struct request_queue *q, struct bio *bio)
545{ 545{
546 struct cardinfo *card = q->queuedata; 546 struct cardinfo *card = q->queuedata;
547 pr_debug("mm_make_request %llu %u\n", 547 pr_debug("mm_make_request %llu %u\n",
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index dae39911a11..85916e2665d 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -400,7 +400,7 @@ error_ret:
400/* 400/*
401 * This is the external request processing routine 401 * This is the external request processing routine
402 */ 402 */
403static void do_viodasd_request(request_queue_t *q) 403static void do_viodasd_request(struct request_queue *q)
404{ 404{
405 struct request *req; 405 struct request *req;
406 406
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0d97b7eb818..624d30f7da3 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -298,7 +298,7 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address)
298} 298}
299 299
300/* do_xd_request: handle an incoming request */ 300/* do_xd_request: handle an incoming request */
301static void do_xd_request (request_queue_t * q) 301static void do_xd_request (struct request_queue * q)
302{ 302{
303 struct request *req; 303 struct request *req;
304 304
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
index 82e090fea95..cffd44a2038 100644
--- a/drivers/block/xd.h
+++ b/drivers/block/xd.h
@@ -104,7 +104,7 @@ static int xd_manual_geo_init (char *command);
104static u_char xd_detect (u_char *controller, unsigned int *address); 104static u_char xd_detect (u_char *controller, unsigned int *address);
105static u_char xd_initdrives (void (*init_drive)(u_char drive)); 105static u_char xd_initdrives (void (*init_drive)(u_char drive));
106 106
107static void do_xd_request (request_queue_t * q); 107static void do_xd_request (struct request_queue * q);
108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg); 108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); 109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
110static void xd_recalibrate (u_char drive); 110static void xd_recalibrate (u_char drive);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6746c29181f..964e51634f2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -241,7 +241,7 @@ static inline void flush_requests(struct blkfront_info *info)
241 * do_blkif_request 241 * do_blkif_request
242 * read a block; request is in a request queue 242 * read a block; request is in a request queue
243 */ 243 */
244static void do_blkif_request(request_queue_t *rq) 244static void do_blkif_request(struct request_queue *rq)
245{ 245{
246 struct blkfront_info *info = NULL; 246 struct blkfront_info *info = NULL;
247 struct request *req; 247 struct request *req;
@@ -287,7 +287,7 @@ wait:
287 287
288static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 288static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
289{ 289{
290 request_queue_t *rq; 290 struct request_queue *rq;
291 291
292 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 292 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
293 if (rq == NULL) 293 if (rq == NULL)
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 732ec63b6e9..cb27e8863d7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -458,7 +458,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
458} 458}
459 459
460/* Get the next read/write request; ending requests that we don't handle */ 460/* Get the next read/write request; ending requests that we don't handle */
461struct request *ace_get_next_request(request_queue_t * q) 461struct request *ace_get_next_request(struct request_queue * q)
462{ 462{
463 struct request *req; 463 struct request *req;
464 464
@@ -825,7 +825,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
825/* --------------------------------------------------------------------- 825/* ---------------------------------------------------------------------
826 * Block ops 826 * Block ops
827 */ 827 */
828static void ace_request(request_queue_t * q) 828static void ace_request(struct request_queue * q)
829{ 829{
830 struct request *req; 830 struct request *req;
831 struct ace_device *ace; 831 struct ace_device *ace;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index e40fa98842e..2d5853cbd4b 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(z2ram_lock);
67static struct block_device_operations z2_fops; 67static struct block_device_operations z2_fops;
68static struct gendisk *z2ram_gendisk; 68static struct gendisk *z2ram_gendisk;
69 69
70static void do_z2_request(request_queue_t *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73 while ((req = elv_next_request(q)) != NULL) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 499019bf8f4..67ee3d4b287 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2094,7 +2094,7 @@ out:
2094static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, 2094static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2095 int lba, int nframes) 2095 int lba, int nframes)
2096{ 2096{
2097 request_queue_t *q = cdi->disk->queue; 2097 struct request_queue *q = cdi->disk->queue;
2098 struct request *rq; 2098 struct request *rq;
2099 struct bio *bio; 2099 struct bio *bio;
2100 unsigned int len; 2100 unsigned int len;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 44cd7b2ddf0..e51550db157 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -398,7 +398,7 @@ static void viocd_end_request(struct request *req, int uptodate)
398 398
399static int rwreq; 399static int rwreq;
400 400
401static void do_viocd_request(request_queue_t *q) 401static void do_viocd_request(struct request_queue *q)
402{ 402{
403 struct request *req; 403 struct request *req;
404 404
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1486eb212cc..ca843522f91 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3071,7 +3071,7 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
3071/* 3071/*
3072 * standard prep_rq_fn that builds 10 byte cmds 3072 * standard prep_rq_fn that builds 10 byte cmds
3073 */ 3073 */
3074static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq) 3074static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
3075{ 3075{
3076 int hard_sect = queue_hardsect_size(q); 3076 int hard_sect = queue_hardsect_size(q);
3077 long block = (long)rq->hard_sector / (hard_sect >> 9); 3077 long block = (long)rq->hard_sector / (hard_sect >> 9);
@@ -3137,7 +3137,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
3137 return BLKPREP_OK; 3137 return BLKPREP_OK;
3138} 3138}
3139 3139
3140static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) 3140static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
3141{ 3141{
3142 if (blk_fs_request(rq)) 3142 if (blk_fs_request(rq))
3143 return ide_cdrom_prep_fs(q, rq); 3143 return ide_cdrom_prep_fs(q, rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index b1304a7f3e0..5ce4216f72a 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -679,7 +679,7 @@ static ide_proc_entry_t idedisk_proc[] = {
679}; 679};
680#endif /* CONFIG_IDE_PROC_FS */ 680#endif /* CONFIG_IDE_PROC_FS */
681 681
682static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) 682static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
683{ 683{
684 ide_drive_t *drive = q->queuedata; 684 ide_drive_t *drive = q->queuedata;
685 685
@@ -697,7 +697,7 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
697 rq->buffer = rq->cmd; 697 rq->buffer = rq->cmd;
698} 698}
699 699
700static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 700static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
701 sector_t *error_sector) 701 sector_t *error_sector)
702{ 702{
703 ide_drive_t *drive = q->queuedata; 703 ide_drive_t *drive = q->queuedata;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 484c50e7144..aa9f5f0b1e6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1327,7 +1327,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1327/* 1327/*
1328 * Passes the stuff to ide_do_request 1328 * Passes the stuff to ide_do_request
1329 */ 1329 */
1330void do_ide_request(request_queue_t *q) 1330void do_ide_request(struct request_queue *q)
1331{ 1331{
1332 ide_drive_t *drive = q->queuedata; 1332 ide_drive_t *drive = q->queuedata;
1333 1333
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5a4c5ea12f8..3a2a9a338fd 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -945,7 +945,7 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
945 */ 945 */
946static int ide_init_queue(ide_drive_t *drive) 946static int ide_init_queue(ide_drive_t *drive)
947{ 947{
948 request_queue_t *q; 948 struct request_queue *q;
949 ide_hwif_t *hwif = HWIF(drive); 949 ide_hwif_t *hwif = HWIF(drive);
950 int max_sectors = 256; 950 int max_sectors = 256;
951 int max_sg_entries = PRD_ENTRIES; 951 int max_sg_entries = PRD_ENTRIES;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 8f2db8dd35f..8e05d88e81b 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -652,7 +652,7 @@ repeat:
652 } 652 }
653} 653}
654 654
655static void do_hd_request (request_queue_t * q) 655static void do_hd_request (struct request_queue * q)
656{ 656{
657 disable_irq(HD_IRQ); 657 disable_irq(HD_IRQ);
658 hd_request(); 658 hd_request();
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b0016..2bcde5798b5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
526 526
527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) 527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
528{ 528{
529 request_queue_t *q = bdev_get_queue(bdev); 529 struct request_queue *q = bdev_get_queue(bdev);
530 struct io_restrictions *rs = &ti->limits; 530 struct io_restrictions *rs = &ti->limits;
531 531
532 /* 532 /*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
979 devices = dm_table_get_devices(t); 979 devices = dm_table_get_devices(t);
980 for (d = devices->next; d != devices; d = d->next) { 980 for (d = devices->next; d != devices; d = d->next) {
981 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 981 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
982 request_queue_t *q = bdev_get_queue(dd->bdev); 982 struct request_queue *q = bdev_get_queue(dd->bdev);
983 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 983 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
984 } 984 }
985 985
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
992 992
993 for (d = devices->next; d != devices; d = d->next) { 993 for (d = devices->next; d != devices; d = d->next) {
994 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 994 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
995 request_queue_t *q = bdev_get_queue(dd->bdev); 995 struct request_queue *q = bdev_get_queue(dd->bdev);
996 996
997 if (q->unplug_fn) 997 if (q->unplug_fn)
998 q->unplug_fn(q); 998 q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
1011 1011
1012 for (d = devices->next; d != devices; d = d->next) { 1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 request_queue_t *q = bdev_get_queue(dd->bdev); 1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err; 1015 int err;
1016 1016
1017 if (!q->issue_flush_fn) 1017 if (!q->issue_flush_fn)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e676c..141ff9fa296 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
80 80
81 unsigned long flags; 81 unsigned long flags;
82 82
83 request_queue_t *queue; 83 struct request_queue *queue;
84 struct gendisk *disk; 84 struct gendisk *disk;
85 char name[16]; 85 char name[16];
86 86
@@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
792 * The request function that just remaps the bio built up by 792 * The request function that just remaps the bio built up by
793 * dm_merge_bvec. 793 * dm_merge_bvec.
794 */ 794 */
795static int dm_request(request_queue_t *q, struct bio *bio) 795static int dm_request(struct request_queue *q, struct bio *bio)
796{ 796{
797 int r; 797 int r;
798 int rw = bio_data_dir(bio); 798 int rw = bio_data_dir(bio);
@@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
844 return 0; 844 return 0;
845} 845}
846 846
847static int dm_flush_all(request_queue_t *q, struct gendisk *disk, 847static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
848 sector_t *error_sector) 848 sector_t *error_sector)
849{ 849{
850 struct mapped_device *md = q->queuedata; 850 struct mapped_device *md = q->queuedata;
@@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
859 return ret; 859 return ret;
860} 860}
861 861
862static void dm_unplug_all(request_queue_t *q) 862static void dm_unplug_all(struct request_queue *q)
863{ 863{
864 struct mapped_device *md = q->queuedata; 864 struct mapped_device *md = q->queuedata;
865 struct dm_table *map = dm_get_table(md); 865 struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
1110 1110
1111static int __bind(struct mapped_device *md, struct dm_table *t) 1111static int __bind(struct mapped_device *md, struct dm_table *t)
1112{ 1112{
1113 request_queue_t *q = md->queue; 1113 struct request_queue *q = md->queue;
1114 sector_t size; 1114 sector_t size;
1115 1115
1116 size = dm_table_get_size(t); 1116 size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2a75e..cb059cf14c2 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
167 conf->nfaults = n+1; 167 conf->nfaults = n+1;
168} 168}
169 169
170static int make_request(request_queue_t *q, struct bio *bio) 170static int make_request(struct request_queue *q, struct bio *bio)
171{ 171{
172 mddev_t *mddev = q->queuedata; 172 mddev_t *mddev = q->queuedata;
173 conf_t *conf = (conf_t*)mddev->private; 173 conf_t *conf = (conf_t*)mddev->private;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 19274108319..17f795c3e0a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
59{ 59{
60 mddev_t *mddev = q->queuedata; 60 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 61 dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
79 return maxsectors << 9; 79 return maxsectors << 9;
80} 80}
81 81
82static void linear_unplug(request_queue_t *q) 82static void linear_unplug(struct request_queue *q)
83{ 83{
84 mddev_t *mddev = q->queuedata; 84 mddev_t *mddev = q->queuedata;
85 linear_conf_t *conf = mddev_to_conf(mddev); 85 linear_conf_t *conf = mddev_to_conf(mddev);
86 int i; 86 int i;
87 87
88 for (i=0; i < mddev->raid_disks; i++) { 88 for (i=0; i < mddev->raid_disks; i++) {
89 request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
90 if (r_queue->unplug_fn) 90 if (r_queue->unplug_fn)
91 r_queue->unplug_fn(r_queue); 91 r_queue->unplug_fn(r_queue);
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, 95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector) 96 sector_t *error_sector)
97{ 97{
98 mddev_t *mddev = q->queuedata; 98 mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
101 101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) { 102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev; 103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 request_queue_t *r_queue = bdev_get_queue(bdev); 104 struct request_queue *r_queue = bdev_get_queue(bdev);
105 105
106 if (!r_queue->issue_flush_fn) 106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP; 107 ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
118 int i, ret = 0; 118 int i, ret = 0;
119 119
120 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 120 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
121 request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev); 121 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
122 ret |= bdi_congested(&q->backing_dev_info, bits); 122 ret |= bdi_congested(&q->backing_dev_info, bits);
123 } 123 }
124 return ret; 124 return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
330 return 0; 330 return 0;
331} 331}
332 332
333static int linear_make_request (request_queue_t *q, struct bio *bio) 333static int linear_make_request (struct request_queue *q, struct bio *bio)
334{ 334{
335 const int rw = bio_data_dir(bio); 335 const int rw = bio_data_dir(bio);
336 mddev_t *mddev = q->queuedata; 336 mddev_t *mddev = q->queuedata;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc887dfd..f883b7e37f3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
211 ) 211 )
212 212
213 213
214static int md_fail_request (request_queue_t *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio, bio->bi_size);
217 return 0; 217 return 0;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37fee37..1e2af43a73b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
126 if (rdev && !test_bit(Faulty, &rdev->flags) 126 if (rdev && !test_bit(Faulty, &rdev->flags)
127 && atomic_read(&rdev->nr_pending)) { 127 && atomic_read(&rdev->nr_pending)) {
128 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 128 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
129 129
130 atomic_inc(&rdev->nr_pending); 130 atomic_inc(&rdev->nr_pending);
131 rcu_read_unlock(); 131 rcu_read_unlock();
@@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev)
140 rcu_read_unlock(); 140 rcu_read_unlock();
141} 141}
142 142
143static void multipath_unplug(request_queue_t *q) 143static void multipath_unplug(struct request_queue *q)
144{ 144{
145 unplug_slaves(q->queuedata); 145 unplug_slaves(q->queuedata);
146} 146}
147 147
148 148
149static int multipath_make_request (request_queue_t *q, struct bio * bio) 149static int multipath_make_request (struct request_queue *q, struct bio * bio)
150{ 150{
151 mddev_t *mddev = q->queuedata; 151 mddev_t *mddev = q->queuedata;
152 multipath_conf_t *conf = mddev_to_conf(mddev); 152 multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
199 seq_printf (seq, "]"); 199 seq_printf (seq, "]");
200} 200}
201 201
202static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, 202static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
203 sector_t *error_sector) 203 sector_t *error_sector)
204{ 204{
205 mddev_t *mddev = q->queuedata; 205 mddev_t *mddev = q->queuedata;
@@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
212 if (rdev && !test_bit(Faulty, &rdev->flags)) { 212 if (rdev && !test_bit(Faulty, &rdev->flags)) {
213 struct block_device *bdev = rdev->bdev; 213 struct block_device *bdev = rdev->bdev;
214 request_queue_t *r_queue = bdev_get_queue(bdev); 214 struct request_queue *r_queue = bdev_get_queue(bdev);
215 215
216 if (!r_queue->issue_flush_fn) 216 if (!r_queue->issue_flush_fn)
217 ret = -EOPNOTSUPP; 217 ret = -EOPNOTSUPP;
@@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits)
238 for (i = 0; i < mddev->raid_disks ; i++) { 238 for (i = 0; i < mddev->raid_disks ; i++) {
239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
240 if (rdev && !test_bit(Faulty, &rdev->flags)) { 240 if (rdev && !test_bit(Faulty, &rdev->flags)) {
241 request_queue_t *q = bdev_get_queue(rdev->bdev); 241 struct request_queue *q = bdev_get_queue(rdev->bdev);
242 242
243 ret |= bdi_congested(&q->backing_dev_info, bits); 243 ret |= bdi_congested(&q->backing_dev_info, bits);
244 /* Just like multipath_map, we just check the 244 /* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f73a37..b8216bc6db4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
25#define MD_DRIVER 25#define MD_DRIVER
26#define MD_PERSONALITY 26#define MD_PERSONALITY
27 27
28static void raid0_unplug(request_queue_t *q) 28static void raid0_unplug(struct request_queue *q)
29{ 29{
30 mddev_t *mddev = q->queuedata; 30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev); 31 raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
33 int i; 33 int i;
34 34
35 for (i=0; i<mddev->raid_disks; i++) { 35 for (i=0; i<mddev->raid_disks; i++) {
36 request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); 36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
37 37
38 if (r_queue->unplug_fn) 38 if (r_queue->unplug_fn)
39 r_queue->unplug_fn(r_queue); 39 r_queue->unplug_fn(r_queue);
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, 43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector) 44 sector_t *error_sector)
45{ 45{
46 mddev_t *mddev = q->queuedata; 46 mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
50 50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev; 52 struct block_device *bdev = devlist[i]->bdev;
53 request_queue_t *r_queue = bdev_get_queue(bdev); 53 struct request_queue *r_queue = bdev_get_queue(bdev);
54 54
55 if (!r_queue->issue_flush_fn) 55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP; 56 ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
68 int i, ret = 0; 68 int i, ret = 0;
69 69
70 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 70 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
71 request_queue_t *q = bdev_get_queue(devlist[i]->bdev); 71 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
72 72
73 ret |= bdi_congested(&q->backing_dev_info, bits); 73 ret |= bdi_congested(&q->backing_dev_info, bits);
74 } 74 }
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
268 * 268 *
269 * Return amount of bytes we can accept at this offset 269 * Return amount of bytes we can accept at this offset
270 */ 270 */
271static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 271static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
272{ 272{
273 mddev_t *mddev = q->queuedata; 273 mddev_t *mddev = q->queuedata;
274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
408 return 0; 408 return 0;
409} 409}
410 410
411static int raid0_make_request (request_queue_t *q, struct bio *bio) 411static int raid0_make_request (struct request_queue *q, struct bio *bio)
412{ 412{
413 mddev_t *mddev = q->queuedata; 413 mddev_t *mddev = q->queuedata;
414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b77b13..650991bddd8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev)
552 for (i=0; i<mddev->raid_disks; i++) { 552 for (i=0; i<mddev->raid_disks; i++) {
553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
555 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 555 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
556 556
557 atomic_inc(&rdev->nr_pending); 557 atomic_inc(&rdev->nr_pending);
558 rcu_read_unlock(); 558 rcu_read_unlock();
@@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev)
567 rcu_read_unlock(); 567 rcu_read_unlock();
568} 568}
569 569
570static void raid1_unplug(request_queue_t *q) 570static void raid1_unplug(struct request_queue *q)
571{ 571{
572 mddev_t *mddev = q->queuedata; 572 mddev_t *mddev = q->queuedata;
573 573
@@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q)
575 md_wakeup_thread(mddev->thread); 575 md_wakeup_thread(mddev->thread);
576} 576}
577 577
578static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, 578static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
579 sector_t *error_sector) 579 sector_t *error_sector)
580{ 580{
581 mddev_t *mddev = q->queuedata; 581 mddev_t *mddev = q->queuedata;
@@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
588 if (rdev && !test_bit(Faulty, &rdev->flags)) { 588 if (rdev && !test_bit(Faulty, &rdev->flags)) {
589 struct block_device *bdev = rdev->bdev; 589 struct block_device *bdev = rdev->bdev;
590 request_queue_t *r_queue = bdev_get_queue(bdev); 590 struct request_queue *r_queue = bdev_get_queue(bdev);
591 591
592 if (!r_queue->issue_flush_fn) 592 if (!r_queue->issue_flush_fn)
593 ret = -EOPNOTSUPP; 593 ret = -EOPNOTSUPP;
@@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits)
615 for (i = 0; i < mddev->raid_disks; i++) { 615 for (i = 0; i < mddev->raid_disks; i++) {
616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
617 if (rdev && !test_bit(Faulty, &rdev->flags)) { 617 if (rdev && !test_bit(Faulty, &rdev->flags)) {
618 request_queue_t *q = bdev_get_queue(rdev->bdev); 618 struct request_queue *q = bdev_get_queue(rdev->bdev);
619 619
620 /* Note the '|| 1' - when read_balance prefers 620 /* Note the '|| 1' - when read_balance prefers
621 * non-congested targets, it can be removed 621 * non-congested targets, it can be removed
@@ -765,7 +765,7 @@ do_sync_io:
765 return NULL; 765 return NULL;
766} 766}
767 767
768static int make_request(request_queue_t *q, struct bio * bio) 768static int make_request(struct request_queue *q, struct bio * bio)
769{ 769{
770 mddev_t *mddev = q->queuedata; 770 mddev_t *mddev = q->queuedata;
771 conf_t *conf = mddev_to_conf(mddev); 771 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1cfac..f730a144baf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
453 * If near_copies == raid_disk, there are no striping issues, 453 * If near_copies == raid_disk, there are no striping issues,
454 * but in that case, the function isn't called at all. 454 * but in that case, the function isn't called at all.
455 */ 455 */
456static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio, 456static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
457 struct bio_vec *bio_vec) 457 struct bio_vec *bio_vec)
458{ 458{
459 mddev_t *mddev = q->queuedata; 459 mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
595 for (i=0; i<mddev->raid_disks; i++) { 595 for (i=0; i<mddev->raid_disks; i++) {
596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
598 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 598 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
599 599
600 atomic_inc(&rdev->nr_pending); 600 atomic_inc(&rdev->nr_pending);
601 rcu_read_unlock(); 601 rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
610 rcu_read_unlock(); 610 rcu_read_unlock();
611} 611}
612 612
613static void raid10_unplug(request_queue_t *q) 613static void raid10_unplug(struct request_queue *q)
614{ 614{
615 mddev_t *mddev = q->queuedata; 615 mddev_t *mddev = q->queuedata;
616 616
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
618 md_wakeup_thread(mddev->thread); 618 md_wakeup_thread(mddev->thread);
619} 619}
620 620
621static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 621static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
622 sector_t *error_sector) 622 sector_t *error_sector)
623{ 623{
624 mddev_t *mddev = q->queuedata; 624 mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
631 if (rdev && !test_bit(Faulty, &rdev->flags)) { 631 if (rdev && !test_bit(Faulty, &rdev->flags)) {
632 struct block_device *bdev = rdev->bdev; 632 struct block_device *bdev = rdev->bdev;
633 request_queue_t *r_queue = bdev_get_queue(bdev); 633 struct request_queue *r_queue = bdev_get_queue(bdev);
634 634
635 if (!r_queue->issue_flush_fn) 635 if (!r_queue->issue_flush_fn)
636 ret = -EOPNOTSUPP; 636 ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) { 658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
660 if (rdev && !test_bit(Faulty, &rdev->flags)) { 660 if (rdev && !test_bit(Faulty, &rdev->flags)) {
661 request_queue_t *q = bdev_get_queue(rdev->bdev); 661 struct request_queue *q = bdev_get_queue(rdev->bdev);
662 662
663 ret |= bdi_congested(&q->backing_dev_info, bits); 663 ret |= bdi_congested(&q->backing_dev_info, bits);
664 } 664 }
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
772 spin_unlock_irq(&conf->resync_lock); 772 spin_unlock_irq(&conf->resync_lock);
773} 773}
774 774
775static int make_request(request_queue_t *q, struct bio * bio) 775static int make_request(struct request_queue *q, struct bio * bio)
776{ 776{
777 mddev_t *mddev = q->queuedata; 777 mddev_t *mddev = q->queuedata;
778 conf_t *conf = mddev_to_conf(mddev); 778 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d90ee145eff..2aff4be35dc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
289} 289}
290 290
291static void unplug_slaves(mddev_t *mddev); 291static void unplug_slaves(mddev_t *mddev);
292static void raid5_unplug_device(request_queue_t *q); 292static void raid5_unplug_device(struct request_queue *q);
293 293
294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
295 int pd_idx, int noblock) 295 int pd_idx, int noblock)
@@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev)
3182 for (i=0; i<mddev->raid_disks; i++) { 3182 for (i=0; i<mddev->raid_disks; i++) {
3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3185 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 3185 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3186 3186
3187 atomic_inc(&rdev->nr_pending); 3187 atomic_inc(&rdev->nr_pending);
3188 rcu_read_unlock(); 3188 rcu_read_unlock();
@@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev)
3197 rcu_read_unlock(); 3197 rcu_read_unlock();
3198} 3198}
3199 3199
3200static void raid5_unplug_device(request_queue_t *q) 3200static void raid5_unplug_device(struct request_queue *q)
3201{ 3201{
3202 mddev_t *mddev = q->queuedata; 3202 mddev_t *mddev = q->queuedata;
3203 raid5_conf_t *conf = mddev_to_conf(mddev); 3203 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q)
3216 unplug_slaves(mddev); 3216 unplug_slaves(mddev);
3217} 3217}
3218 3218
3219static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 3219static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3220 sector_t *error_sector) 3220 sector_t *error_sector)
3221{ 3221{
3222 mddev_t *mddev = q->queuedata; 3222 mddev_t *mddev = q->queuedata;
@@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3229 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3229 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3230 struct block_device *bdev = rdev->bdev; 3230 struct block_device *bdev = rdev->bdev;
3231 request_queue_t *r_queue = bdev_get_queue(bdev); 3231 struct request_queue *r_queue = bdev_get_queue(bdev);
3232 3232
3233 if (!r_queue->issue_flush_fn) 3233 if (!r_queue->issue_flush_fn)
3234 ret = -EOPNOTSUPP; 3234 ret = -EOPNOTSUPP;
@@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits)
3267/* We want read requests to align with chunks where possible, 3267/* We want read requests to align with chunks where possible,
3268 * but write requests don't need to. 3268 * but write requests don't need to.
3269 */ 3269 */
3270static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 3270static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
3271{ 3271{
3272 mddev_t *mddev = q->queuedata; 3272 mddev_t *mddev = q->queuedata;
3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3377 3377
3378static int bio_fits_rdev(struct bio *bi) 3378static int bio_fits_rdev(struct bio *bi)
3379{ 3379{
3380 request_queue_t *q = bdev_get_queue(bi->bi_bdev); 3380 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3381 3381
3382 if ((bi->bi_size>>9) > q->max_sectors) 3382 if ((bi->bi_size>>9) > q->max_sectors)
3383 return 0; 3383 return 0;
@@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi)
3396} 3396}
3397 3397
3398 3398
3399static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 3399static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3400{ 3400{
3401 mddev_t *mddev = q->queuedata; 3401 mddev_t *mddev = q->queuedata;
3402 raid5_conf_t *conf = mddev_to_conf(mddev); 3402 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
3466} 3466}
3467 3467
3468 3468
3469static int make_request(request_queue_t *q, struct bio * bi) 3469static int make_request(struct request_queue *q, struct bio * bi)
3470{ 3470{
3471 mddev_t *mddev = q->queuedata; 3471 mddev_t *mddev = q->queuedata;
3472 raid5_conf_t *conf = mddev_to_conf(mddev); 3472 raid5_conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 988c8ce47f5..5e1c99f83ab 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -159,7 +159,7 @@ static int i2o_block_device_flush(struct i2o_device *dev)
159 * Returns 0 on success or negative error code on failure. 159 * Returns 0 on success or negative error code on failure.
160 */ 160 */
161 161
162static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, 162static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
163 sector_t * error_sector) 163 sector_t * error_sector)
164{ 164{
165 struct i2o_block_device *i2o_blk_dev = queue->queuedata; 165 struct i2o_block_device *i2o_blk_dev = queue->queuedata;
@@ -445,7 +445,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
445{ 445{
446 struct i2o_block_request *ireq = req->special; 446 struct i2o_block_request *ireq = req->special;
447 struct i2o_block_device *dev = ireq->i2o_blk_dev; 447 struct i2o_block_device *dev = ireq->i2o_blk_dev;
448 request_queue_t *q = req->q; 448 struct request_queue *q = req->q;
449 unsigned long flags; 449 unsigned long flags;
450 450
451 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 451 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b53dac8d1b6..e02eac87636 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -83,7 +83,7 @@ static int mmc_queue_thread(void *d)
83 * on any queue on this host, and attempt to issue it. This may 83 * on any queue on this host, and attempt to issue it. This may
84 * not be the queue we were asked to process. 84 * not be the queue we were asked to process.
85 */ 85 */
86static void mmc_request(request_queue_t *q) 86static void mmc_request(struct request_queue *q)
87{ 87{
88 struct mmc_queue *mq = q->queuedata; 88 struct mmc_queue *mq = q->queuedata;
89 struct request *req; 89 struct request *req;
@@ -211,7 +211,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
211 211
212void mmc_cleanup_queue(struct mmc_queue *mq) 212void mmc_cleanup_queue(struct mmc_queue *mq)
213{ 213{
214 request_queue_t *q = mq->queue; 214 struct request_queue *q = mq->queue;
215 unsigned long flags; 215 unsigned long flags;
216 216
217 /* Mark that we should start throwing out stragglers */ 217 /* Mark that we should start throwing out stragglers */
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(mmc_cleanup_queue);
252 */ 252 */
253void mmc_queue_suspend(struct mmc_queue *mq) 253void mmc_queue_suspend(struct mmc_queue *mq)
254{ 254{
255 request_queue_t *q = mq->queue; 255 struct request_queue *q = mq->queue;
256 unsigned long flags; 256 unsigned long flags;
257 257
258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
@@ -272,7 +272,7 @@ void mmc_queue_suspend(struct mmc_queue *mq)
272 */ 272 */
273void mmc_queue_resume(struct mmc_queue *mq) 273void mmc_queue_resume(struct mmc_queue *mq)
274{ 274{
275 request_queue_t *q = mq->queue; 275 struct request_queue *q = mq->queue;
276 unsigned long flags; 276 unsigned long flags;
277 277
278 if (mq->flags & MMC_QUEUE_SUSPENDED) { 278 if (mq->flags & MMC_QUEUE_SUSPENDED) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bfeca57098f..e6bfce690ca 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1187,7 +1187,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1187static void 1187static void
1188__dasd_process_blk_queue(struct dasd_device * device) 1188__dasd_process_blk_queue(struct dasd_device * device)
1189{ 1189{
1190 request_queue_t *queue; 1190 struct request_queue *queue;
1191 struct request *req; 1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1192 struct dasd_ccw_req *cqr;
1193 int nr_queued; 1193 int nr_queued;
@@ -1740,7 +1740,7 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1740 * Dasd request queue function. Called from ll_rw_blk.c 1740 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1741 */
1742static void 1742static void
1743do_dasd_request(request_queue_t * queue) 1743do_dasd_request(struct request_queue * queue)
1744{ 1744{
1745 struct dasd_device *device; 1745 struct dasd_device *device;
1746 1746
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 241294cba41..aeda5268244 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -293,7 +293,7 @@ struct dasd_uid {
293struct dasd_device { 293struct dasd_device {
294 /* Block device stuff. */ 294 /* Block device stuff. */
295 struct gendisk *gdp; 295 struct gendisk *gdp;
296 request_queue_t *request_queue; 296 struct request_queue *request_queue;
297 spinlock_t request_queue_lock; 297 spinlock_t request_queue_lock;
298 struct block_device *bdev; 298 struct block_device *bdev;
299 unsigned int devindex; 299 unsigned int devindex;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 35765f6a86e..4d8798bacf9 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -621,7 +621,7 @@ out:
621} 621}
622 622
623static int 623static int
624dcssblk_make_request(request_queue_t *q, struct bio *bio) 624dcssblk_make_request(struct request_queue *q, struct bio *bio)
625{ 625{
626 struct dcssblk_dev_info *dev_info; 626 struct dcssblk_dev_info *dev_info;
627 struct bio_vec *bvec; 627 struct bio_vec *bvec;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a04d9120cef..354a060e5be 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -191,7 +191,7 @@ static unsigned long __init xpram_highest_page_index(void)
191/* 191/*
192 * Block device make request function. 192 * Block device make request function.
193 */ 193 */
194static int xpram_make_request(request_queue_t *q, struct bio *bio) 194static int xpram_make_request(struct request_queue *q, struct bio *bio)
195{ 195{
196 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 196 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
197 struct bio_vec *bvec; 197 struct bio_vec *bvec;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 3b52f5c1dbe..dddf8d62c15 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -188,7 +188,7 @@ struct tape_blk_data
188{ 188{
189 struct tape_device * device; 189 struct tape_device * device;
190 /* Block device request queue. */ 190 /* Block device request queue. */
191 request_queue_t * request_queue; 191 struct request_queue * request_queue;
192 spinlock_t request_queue_lock; 192 spinlock_t request_queue_lock;
193 193
194 /* Task to move entries from block request to CCS request queue. */ 194 /* Task to move entries from block request to CCS request queue. */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index dd0ecaed592..eeb92e2ed0c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -147,7 +147,7 @@ static void
147tapeblock_requeue(struct work_struct *work) { 147tapeblock_requeue(struct work_struct *work) {
148 struct tape_blk_data * blkdat; 148 struct tape_blk_data * blkdat;
149 struct tape_device * device; 149 struct tape_device * device;
150 request_queue_t * queue; 150 struct request_queue * queue;
151 int nr_queued; 151 int nr_queued;
152 struct request * req; 152 struct request * req;
153 struct list_head * l; 153 struct list_head * l;
@@ -194,7 +194,7 @@ tapeblock_requeue(struct work_struct *work) {
194 * Tape request queue function. Called from ll_rw_blk.c 194 * Tape request queue function. Called from ll_rw_blk.c
195 */ 195 */
196static void 196static void
197tapeblock_request_fn(request_queue_t *queue) 197tapeblock_request_fn(struct request_queue *queue)
198{ 198{
199 struct tape_device *device; 199 struct tape_device *device;
200 200
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 5157a2abc58..4b7079fdc10 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -185,7 +185,7 @@ static void jsfd_read(char *buf, unsigned long p, size_t togo) {
185 } 185 }
186} 186}
187 187
188static void jsfd_do_request(request_queue_t *q) 188static void jsfd_do_request(struct request_queue *q)
189{ 189{
190 struct request *req; 190 struct request *req;
191 191
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index da63c544919..21c075d44db 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -654,7 +654,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
654static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 654static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
655 int bytes, int requeue) 655 int bytes, int requeue)
656{ 656{
657 request_queue_t *q = cmd->device->request_queue; 657 struct request_queue *q = cmd->device->request_queue;
658 struct request *req = cmd->request; 658 struct request *req = cmd->request;
659 unsigned long flags; 659 unsigned long flags;
660 660
@@ -818,7 +818,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
818{ 818{
819 int result = cmd->result; 819 int result = cmd->result;
820 int this_count = cmd->request_bufflen; 820 int this_count = cmd->request_bufflen;
821 request_queue_t *q = cmd->device->request_queue; 821 struct request_queue *q = cmd->device->request_queue;
822 struct request *req = cmd->request; 822 struct request *req = cmd->request;
823 int clear_errors = 1; 823 int clear_errors = 1;
824 struct scsi_sense_hdr sshdr; 824 struct scsi_sense_hdr sshdr;
@@ -1038,7 +1038,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1038 return BLKPREP_KILL; 1038 return BLKPREP_KILL;
1039} 1039}
1040 1040
1041static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1041static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk,
1042 sector_t *error_sector) 1042 sector_t *error_sector)
1043{ 1043{
1044 struct scsi_device *sdev = q->queuedata; 1044 struct scsi_device *sdev = q->queuedata;
@@ -1340,7 +1340,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1340/* 1340/*
1341 * Kill a request for a dead device 1341 * Kill a request for a dead device
1342 */ 1342 */
1343static void scsi_kill_request(struct request *req, request_queue_t *q) 1343static void scsi_kill_request(struct request *req, struct request_queue *q)
1344{ 1344{
1345 struct scsi_cmnd *cmd = req->special; 1345 struct scsi_cmnd *cmd = req->special;
1346 struct scsi_device *sdev = cmd->device; 1346 struct scsi_device *sdev = cmd->device;
@@ -2119,7 +2119,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2119int 2119int
2120scsi_internal_device_block(struct scsi_device *sdev) 2120scsi_internal_device_block(struct scsi_device *sdev)
2121{ 2121{
2122 request_queue_t *q = sdev->request_queue; 2122 struct request_queue *q = sdev->request_queue;
2123 unsigned long flags; 2123 unsigned long flags;
2124 int err = 0; 2124 int err = 0;
2125 2125
@@ -2159,7 +2159,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2159int 2159int
2160scsi_internal_device_unblock(struct scsi_device *sdev) 2160scsi_internal_device_unblock(struct scsi_device *sdev)
2161{ 2161{
2162 request_queue_t *q = sdev->request_queue; 2162 struct request_queue *q = sdev->request_queue;
2163 int err; 2163 int err;
2164 unsigned long flags; 2164 unsigned long flags;
2165 2165
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 424d557284a..e21c7142a3e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -814,7 +814,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
814 return ret; 814 return ret;
815} 815}
816 816
817static void sd_prepare_flush(request_queue_t *q, struct request *rq) 817static void sd_prepare_flush(struct request_queue *q, struct request *rq)
818{ 818{
819 memset(rq->cmd, 0, sizeof(rq->cmd)); 819 memset(rq->cmd, 0, sizeof(rq->cmd));
820 rq->cmd_type = REQ_TYPE_BLOCK_PC; 820 rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1285,7 +1285,7 @@ got_data:
1285 */ 1285 */
1286 int hard_sector = sector_size; 1286 int hard_sector = sector_size;
1287 sector_t sz = (sdkp->capacity/2) * (hard_sector/256); 1287 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1288 request_queue_t *queue = sdp->request_queue; 1288 struct request_queue *queue = sdp->request_queue;
1289 sector_t mb = sz; 1289 sector_t mb = sz;
1290 1290
1291 blk_queue_hardsect_size(queue, hard_sector); 1291 blk_queue_hardsect_size(queue, hard_sector);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e7b6a7fde1c..902eb11ffe8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -624,7 +624,7 @@ static void get_sectorsize(struct scsi_cd *cd)
624 unsigned char *buffer; 624 unsigned char *buffer;
625 int the_result, retries = 3; 625 int the_result, retries = 3;
626 int sector_size; 626 int sector_size;
627 request_queue_t *queue; 627 struct request_queue *queue;
628 628
629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
630 if (!buffer) 630 if (!buffer)
diff --git a/fs/bio.c b/fs/bio.c
index 0d2c2d38b7b..29a44c1b64c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -230,7 +230,7 @@ void bio_put(struct bio *bio)
230 } 230 }
231} 231}
232 232
233inline int bio_phys_segments(request_queue_t *q, struct bio *bio) 233inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
234{ 234{
235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
236 blk_recount_segments(q, bio); 236 blk_recount_segments(q, bio);
@@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
238 return bio->bi_phys_segments; 238 return bio->bi_phys_segments;
239} 239}
240 240
241inline int bio_hw_segments(request_queue_t *q, struct bio *bio) 241inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
242{ 242{
243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
244 blk_recount_segments(q, bio); 244 blk_recount_segments(q, bio);
@@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
257 */ 257 */
258void __bio_clone(struct bio *bio, struct bio *bio_src) 258void __bio_clone(struct bio *bio, struct bio *bio_src)
259{ 259{
260 request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); 260 struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
261 261
262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
263 bio_src->bi_max_vecs * sizeof(struct bio_vec)); 263 bio_src->bi_max_vecs * sizeof(struct bio_vec));
@@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
303 */ 303 */
304int bio_get_nr_vecs(struct block_device *bdev) 304int bio_get_nr_vecs(struct block_device *bdev)
305{ 305{
306 request_queue_t *q = bdev_get_queue(bdev); 306 struct request_queue *q = bdev_get_queue(bdev);
307 int nr_pages; 307 int nr_pages;
308 308
309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
315 return nr_pages; 315 return nr_pages;
316} 316}
317 317
318static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page 318static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
319 *page, unsigned int len, unsigned int offset, 319 *page, unsigned int len, unsigned int offset,
320 unsigned short max_sectors) 320 unsigned short max_sectors)
321{ 321{
@@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
425 * smaller than PAGE_SIZE, so it is always possible to add a single 425 * smaller than PAGE_SIZE, so it is always possible to add a single
426 * page to an empty bio. This should only be used by REQ_PC bios. 426 * page to an empty bio. This should only be used by REQ_PC bios.
427 */ 427 */
428int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, 428int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
429 unsigned int len, unsigned int offset) 429 unsigned int len, unsigned int offset)
430{ 430{
431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
@@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio)
523 * to/from kernel pages as necessary. Must be paired with 523 * to/from kernel pages as necessary. Must be paired with
524 * call bio_uncopy_user() on io completion. 524 * call bio_uncopy_user() on io completion.
525 */ 525 */
526struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, 526struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
527 unsigned int len, int write_to_vm) 527 unsigned int len, int write_to_vm)
528{ 528{
529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -600,7 +600,7 @@ out_bmd:
600 return ERR_PTR(ret); 600 return ERR_PTR(ret);
601} 601}
602 602
603static struct bio *__bio_map_user_iov(request_queue_t *q, 603static struct bio *__bio_map_user_iov(struct request_queue *q,
604 struct block_device *bdev, 604 struct block_device *bdev,
605 struct sg_iovec *iov, int iov_count, 605 struct sg_iovec *iov, int iov_count,
606 int write_to_vm) 606 int write_to_vm)
@@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
712 712
713/** 713/**
714 * bio_map_user - map user address into bio 714 * bio_map_user - map user address into bio
715 * @q: the request_queue_t for the bio 715 * @q: the struct request_queue for the bio
716 * @bdev: destination block device 716 * @bdev: destination block device
717 * @uaddr: start of user address 717 * @uaddr: start of user address
718 * @len: length in bytes 718 * @len: length in bytes
@@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
721 * Map the user space address into a bio suitable for io to a block 721 * Map the user space address into a bio suitable for io to a block
722 * device. Returns an error pointer in case of error. 722 * device. Returns an error pointer in case of error.
723 */ 723 */
724struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 724struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
725 unsigned long uaddr, unsigned int len, int write_to_vm) 725 unsigned long uaddr, unsigned int len, int write_to_vm)
726{ 726{
727 struct sg_iovec iov; 727 struct sg_iovec iov;
@@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
734 734
735/** 735/**
736 * bio_map_user_iov - map user sg_iovec table into bio 736 * bio_map_user_iov - map user sg_iovec table into bio
737 * @q: the request_queue_t for the bio 737 * @q: the struct request_queue for the bio
738 * @bdev: destination block device 738 * @bdev: destination block device
739 * @iov: the iovec. 739 * @iov: the iovec.
740 * @iov_count: number of elements in the iovec 740 * @iov_count: number of elements in the iovec
@@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
743 * Map the user space address into a bio suitable for io to a block 743 * Map the user space address into a bio suitable for io to a block
744 * device. Returns an error pointer in case of error. 744 * device. Returns an error pointer in case of error.
745 */ 745 */
746struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, 746struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
747 struct sg_iovec *iov, int iov_count, 747 struct sg_iovec *iov, int iov_count,
748 int write_to_vm) 748 int write_to_vm)
749{ 749{
@@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
808} 808}
809 809
810 810
811static struct bio *__bio_map_kern(request_queue_t *q, void *data, 811static struct bio *__bio_map_kern(struct request_queue *q, void *data,
812 unsigned int len, gfp_t gfp_mask) 812 unsigned int len, gfp_t gfp_mask)
813{ 813{
814 unsigned long kaddr = (unsigned long)data; 814 unsigned long kaddr = (unsigned long)data;
@@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
847 847
848/** 848/**
849 * bio_map_kern - map kernel address into bio 849 * bio_map_kern - map kernel address into bio
850 * @q: the request_queue_t for the bio 850 * @q: the struct request_queue for the bio
851 * @data: pointer to buffer to map 851 * @data: pointer to buffer to map
852 * @len: length in bytes 852 * @len: length in bytes
853 * @gfp_mask: allocation flags for bio allocation 853 * @gfp_mask: allocation flags for bio allocation
@@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
855 * Map the kernel address into a bio suitable for io to a block 855 * Map the kernel address into a bio suitable for io to a block
856 * device. Returns an error pointer in case of error. 856 * device. Returns an error pointer in case of error.
857 */ 857 */
858struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, 858struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
859 gfp_t gfp_mask) 859 gfp_t gfp_mask)
860{ 860{
861 struct bio *bio; 861 struct bio *bio;
diff --git a/include/asm-arm/arch-omap/mailbox.h b/include/asm-arm/arch-omap/mailbox.h
index 4bf0909461f..7cbed9332e1 100644
--- a/include/asm-arm/arch-omap/mailbox.h
+++ b/include/asm-arm/arch-omap/mailbox.h
@@ -37,7 +37,7 @@ struct omap_mbox_ops {
37 37
38struct omap_mbox_queue { 38struct omap_mbox_queue {
39 spinlock_t lock; 39 spinlock_t lock;
40 request_queue_t *queue; 40 struct request_queue *queue;
41 struct work_struct work; 41 struct work_struct work;
42 int (*callback)(void *); 42 int (*callback)(void *);
43 struct omap_mbox *mbox; 43 struct omap_mbox *mbox;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 695e34964cb..4be37de0205 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,7 +37,6 @@
37struct scsi_ioctl_command; 37struct scsi_ioctl_command;
38 38
39struct request_queue; 39struct request_queue;
40typedef struct request_queue request_queue_t;
41struct elevator_queue; 40struct elevator_queue;
42typedef struct elevator_queue elevator_t; 41typedef struct elevator_queue elevator_t;
43struct request_pm_state; 42struct request_pm_state;
@@ -233,7 +232,7 @@ struct request {
233 struct list_head queuelist; 232 struct list_head queuelist;
234 struct list_head donelist; 233 struct list_head donelist;
235 234
236 request_queue_t *q; 235 struct request_queue *q;
237 236
238 unsigned int cmd_flags; 237 unsigned int cmd_flags;
239 enum rq_cmd_type_bits cmd_type; 238 enum rq_cmd_type_bits cmd_type;
@@ -337,15 +336,15 @@ struct request_pm_state
337 336
338#include <linux/elevator.h> 337#include <linux/elevator.h>
339 338
340typedef void (request_fn_proc) (request_queue_t *q); 339typedef void (request_fn_proc) (struct request_queue *q);
341typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); 340typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
342typedef int (prep_rq_fn) (request_queue_t *, struct request *); 341typedef int (prep_rq_fn) (struct request_queue *, struct request *);
343typedef void (unplug_fn) (request_queue_t *); 342typedef void (unplug_fn) (struct request_queue *);
344 343
345struct bio_vec; 344struct bio_vec;
346typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 345typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
347typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 346typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
348typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 347typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
349typedef void (softirq_done_fn)(struct request *); 348typedef void (softirq_done_fn)(struct request *);
350 349
351enum blk_queue_state { 350enum blk_queue_state {
@@ -626,13 +625,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
626 625
627#ifdef CONFIG_BOUNCE 626#ifdef CONFIG_BOUNCE
628extern int init_emergency_isa_pool(void); 627extern int init_emergency_isa_pool(void);
629extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); 628extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
630#else 629#else
631static inline int init_emergency_isa_pool(void) 630static inline int init_emergency_isa_pool(void)
632{ 631{
633 return 0; 632 return 0;
634} 633}
635static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) 634static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
636{ 635{
637} 636}
638#endif /* CONFIG_MMU */ 637#endif /* CONFIG_MMU */
@@ -646,14 +645,14 @@ extern void blk_unregister_queue(struct gendisk *disk);
646extern void register_disk(struct gendisk *dev); 645extern void register_disk(struct gendisk *dev);
647extern void generic_make_request(struct bio *bio); 646extern void generic_make_request(struct bio *bio);
648extern void blk_put_request(struct request *); 647extern void blk_put_request(struct request *);
649extern void __blk_put_request(request_queue_t *, struct request *); 648extern void __blk_put_request(struct request_queue *, struct request *);
650extern void blk_end_sync_rq(struct request *rq, int error); 649extern void blk_end_sync_rq(struct request *rq, int error);
651extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 650extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
652extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 651extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
653extern void blk_requeue_request(request_queue_t *, struct request *); 652extern void blk_requeue_request(struct request_queue *, struct request *);
654extern void blk_plug_device(request_queue_t *); 653extern void blk_plug_device(struct request_queue *);
655extern int blk_remove_plug(request_queue_t *); 654extern int blk_remove_plug(struct request_queue *);
656extern void blk_recount_segments(request_queue_t *, struct bio *); 655extern void blk_recount_segments(struct request_queue *, struct bio *);
657extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 656extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
658 struct gendisk *, unsigned int, void __user *); 657 struct gendisk *, unsigned int, void __user *);
659extern int sg_scsi_ioctl(struct file *, struct request_queue *, 658extern int sg_scsi_ioctl(struct file *, struct request_queue *,
@@ -662,14 +661,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
662/* 661/*
663 * Temporary export, until SCSI gets fixed up. 662 * Temporary export, until SCSI gets fixed up.
664 */ 663 */
665extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); 664extern int ll_back_merge_fn(struct request_queue *, struct request *,
665 struct bio *);
666 666
667/* 667/*
668 * A queue has just exitted congestion. Note this in the global counter of 668 * A queue has just exitted congestion. Note this in the global counter of
669 * congested queues, and wake up anyone who was waiting for requests to be 669 * congested queues, and wake up anyone who was waiting for requests to be
670 * put back. 670 * put back.
671 */ 671 */
672static inline void blk_clear_queue_congested(request_queue_t *q, int rw) 672static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
673{ 673{
674 clear_bdi_congested(&q->backing_dev_info, rw); 674 clear_bdi_congested(&q->backing_dev_info, rw);
675} 675}
@@ -678,29 +678,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
678 * A queue has just entered congestion. Flag that in the queue's VM-visible 678 * A queue has just entered congestion. Flag that in the queue's VM-visible
679 * state flags and increment the global gounter of congested queues. 679 * state flags and increment the global gounter of congested queues.
680 */ 680 */
681static inline void blk_set_queue_congested(request_queue_t *q, int rw) 681static inline void blk_set_queue_congested(struct request_queue *q, int rw)
682{ 682{
683 set_bdi_congested(&q->backing_dev_info, rw); 683 set_bdi_congested(&q->backing_dev_info, rw);
684} 684}
685 685
686extern void blk_start_queue(request_queue_t *q); 686extern void blk_start_queue(struct request_queue *q);
687extern void blk_stop_queue(request_queue_t *q); 687extern void blk_stop_queue(struct request_queue *q);
688extern void blk_sync_queue(struct request_queue *q); 688extern void blk_sync_queue(struct request_queue *q);
689extern void __blk_stop_queue(request_queue_t *q); 689extern void __blk_stop_queue(struct request_queue *q);
690extern void blk_run_queue(request_queue_t *); 690extern void blk_run_queue(struct request_queue *);
691extern void blk_start_queueing(request_queue_t *); 691extern void blk_start_queueing(struct request_queue *);
692extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 692extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
693extern int blk_rq_unmap_user(struct bio *); 693extern int blk_rq_unmap_user(struct bio *);
694extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 694extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
695extern int blk_rq_map_user_iov(request_queue_t *, struct request *, 695extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
696 struct sg_iovec *, int, unsigned int); 696 struct sg_iovec *, int, unsigned int);
697extern int blk_execute_rq(request_queue_t *, struct gendisk *, 697extern int blk_execute_rq(struct request_queue *, struct gendisk *,
698 struct request *, int); 698 struct request *, int);
699extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 699extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
700 struct request *, int, rq_end_io_fn *); 700 struct request *, int, rq_end_io_fn *);
701extern int blk_verify_command(unsigned char *, int); 701extern int blk_verify_command(unsigned char *, int);
702 702
703static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 703static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
704{ 704{
705 return bdev->bd_disk->queue; 705 return bdev->bd_disk->queue;
706} 706}
@@ -749,41 +749,41 @@ static inline void blkdev_dequeue_request(struct request *req)
749/* 749/*
750 * Access functions for manipulating queue properties 750 * Access functions for manipulating queue properties
751 */ 751 */
752extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, 752extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
753 spinlock_t *lock, int node_id); 753 spinlock_t *lock, int node_id);
754extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); 754extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
755extern void blk_cleanup_queue(request_queue_t *); 755extern void blk_cleanup_queue(struct request_queue *);
756extern void blk_queue_make_request(request_queue_t *, make_request_fn *); 756extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
757extern void blk_queue_bounce_limit(request_queue_t *, u64); 757extern void blk_queue_bounce_limit(struct request_queue *, u64);
758extern void blk_queue_max_sectors(request_queue_t *, unsigned int); 758extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
759extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); 759extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
760extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); 760extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
761extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); 761extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
762extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); 762extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
763extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); 763extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
764extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); 764extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
765extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 765extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
766extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 766extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
767extern void blk_queue_dma_alignment(request_queue_t *, int); 767extern void blk_queue_dma_alignment(struct request_queue *, int);
768extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); 768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
770extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
771extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 771extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
772extern int blk_do_ordered(request_queue_t *, struct request **); 772extern int blk_do_ordered(struct request_queue *, struct request **);
773extern unsigned blk_ordered_cur_seq(request_queue_t *); 773extern unsigned blk_ordered_cur_seq(struct request_queue *);
774extern unsigned blk_ordered_req_seq(struct request *); 774extern unsigned blk_ordered_req_seq(struct request *);
775extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); 775extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
776 776
777extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 777extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
778extern void blk_dump_rq_flags(struct request *, char *); 778extern void blk_dump_rq_flags(struct request *, char *);
779extern void generic_unplug_device(request_queue_t *); 779extern void generic_unplug_device(struct request_queue *);
780extern void __generic_unplug_device(request_queue_t *); 780extern void __generic_unplug_device(struct request_queue *);
781extern long nr_blockdev_pages(void); 781extern long nr_blockdev_pages(void);
782 782
783int blk_get_queue(request_queue_t *); 783int blk_get_queue(struct request_queue *);
784request_queue_t *blk_alloc_queue(gfp_t); 784struct request_queue *blk_alloc_queue(gfp_t);
785request_queue_t *blk_alloc_queue_node(gfp_t, int); 785struct request_queue *blk_alloc_queue_node(gfp_t, int);
786extern void blk_put_queue(request_queue_t *); 786extern void blk_put_queue(struct request_queue *);
787 787
788/* 788/*
789 * tag stuff 789 * tag stuff
@@ -791,13 +791,13 @@ extern void blk_put_queue(request_queue_t *);
791#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 791#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
792#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 792#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
793#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 793#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
794extern int blk_queue_start_tag(request_queue_t *, struct request *); 794extern int blk_queue_start_tag(struct request_queue *, struct request *);
795extern struct request *blk_queue_find_tag(request_queue_t *, int); 795extern struct request *blk_queue_find_tag(struct request_queue *, int);
796extern void blk_queue_end_tag(request_queue_t *, struct request *); 796extern void blk_queue_end_tag(struct request_queue *, struct request *);
797extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); 797extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
798extern void blk_queue_free_tags(request_queue_t *); 798extern void blk_queue_free_tags(struct request_queue *);
799extern int blk_queue_resize_tags(request_queue_t *, int); 799extern int blk_queue_resize_tags(struct request_queue *, int);
800extern void blk_queue_invalidate_tags(request_queue_t *); 800extern void blk_queue_invalidate_tags(struct request_queue *);
801extern struct blk_queue_tag *blk_init_tags(int); 801extern struct blk_queue_tag *blk_init_tags(int);
802extern void blk_free_tags(struct blk_queue_tag *); 802extern void blk_free_tags(struct blk_queue_tag *);
803 803
@@ -809,7 +809,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
809 return bqt->tag_index[tag]; 809 return bqt->tag_index[tag];
810} 810}
811 811
812extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); 812extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
813extern int blkdev_issue_flush(struct block_device *, sector_t *); 813extern int blkdev_issue_flush(struct block_device *, sector_t *);
814 814
815#define MAX_PHYS_SEGMENTS 128 815#define MAX_PHYS_SEGMENTS 128
@@ -821,7 +821,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
821 821
822#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 822#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
823 823
824static inline int queue_hardsect_size(request_queue_t *q) 824static inline int queue_hardsect_size(struct request_queue *q)
825{ 825{
826 int retval = 512; 826 int retval = 512;
827 827
@@ -836,7 +836,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
836 return queue_hardsect_size(bdev_get_queue(bdev)); 836 return queue_hardsect_size(bdev_get_queue(bdev));
837} 837}
838 838
839static inline int queue_dma_alignment(request_queue_t *q) 839static inline int queue_dma_alignment(struct request_queue *q)
840{ 840{
841 int retval = 511; 841 int retval = 511;
842 842
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3680ff9a30e..90874a5d7d7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,7 +144,7 @@ struct blk_user_trace_setup {
144 144
145#if defined(CONFIG_BLK_DEV_IO_TRACE) 145#if defined(CONFIG_BLK_DEV_IO_TRACE)
146extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 146extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
147extern void blk_trace_shutdown(request_queue_t *); 147extern void blk_trace_shutdown(struct request_queue *);
148extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 148extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
149 149
150/** 150/**
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e88fcbc77f8..e8f42133a61 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -5,29 +5,29 @@
5 5
6#ifdef CONFIG_BLOCK 6#ifdef CONFIG_BLOCK
7 7
8typedef int (elevator_merge_fn) (request_queue_t *, struct request **, 8typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
9 struct bio *); 9 struct bio *);
10 10
11typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); 11typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
12 12
13typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); 13typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
14 14
15typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *); 15typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
16 16
17typedef int (elevator_dispatch_fn) (request_queue_t *, int); 17typedef int (elevator_dispatch_fn) (struct request_queue *, int);
18 18
19typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); 19typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
20typedef int (elevator_queue_empty_fn) (request_queue_t *); 20typedef int (elevator_queue_empty_fn) (struct request_queue *);
21typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); 21typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
22typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); 22typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
23typedef int (elevator_may_queue_fn) (request_queue_t *, int); 23typedef int (elevator_may_queue_fn) (struct request_queue *, int);
24 24
25typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t); 25typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
26typedef void (elevator_put_req_fn) (struct request *); 26typedef void (elevator_put_req_fn) (struct request *);
27typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 27typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
28typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
29 29
30typedef void *(elevator_init_fn) (request_queue_t *); 30typedef void *(elevator_init_fn) (struct request_queue *);
31typedef void (elevator_exit_fn) (elevator_t *); 31typedef void (elevator_exit_fn) (elevator_t *);
32 32
33struct elevator_ops 33struct elevator_ops
@@ -94,27 +94,27 @@ struct elevator_queue
94/* 94/*
95 * block elevator interface 95 * block elevator interface
96 */ 96 */
97extern void elv_dispatch_sort(request_queue_t *, struct request *); 97extern void elv_dispatch_sort(struct request_queue *, struct request *);
98extern void elv_dispatch_add_tail(request_queue_t *, struct request *); 98extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
99extern void elv_add_request(request_queue_t *, struct request *, int, int); 99extern void elv_add_request(struct request_queue *, struct request *, int, int);
100extern void __elv_add_request(request_queue_t *, struct request *, int, int); 100extern void __elv_add_request(struct request_queue *, struct request *, int, int);
101extern void elv_insert(request_queue_t *, struct request *, int); 101extern void elv_insert(struct request_queue *, struct request *, int);
102extern int elv_merge(request_queue_t *, struct request **, struct bio *); 102extern int elv_merge(struct request_queue *, struct request **, struct bio *);
103extern void elv_merge_requests(request_queue_t *, struct request *, 103extern void elv_merge_requests(struct request_queue *, struct request *,
104 struct request *); 104 struct request *);
105extern void elv_merged_request(request_queue_t *, struct request *, int); 105extern void elv_merged_request(struct request_queue *, struct request *, int);
106extern void elv_dequeue_request(request_queue_t *, struct request *); 106extern void elv_dequeue_request(struct request_queue *, struct request *);
107extern void elv_requeue_request(request_queue_t *, struct request *); 107extern void elv_requeue_request(struct request_queue *, struct request *);
108extern int elv_queue_empty(request_queue_t *); 108extern int elv_queue_empty(struct request_queue *);
109extern struct request *elv_next_request(struct request_queue *q); 109extern struct request *elv_next_request(struct request_queue *q);
110extern struct request *elv_former_request(request_queue_t *, struct request *); 110extern struct request *elv_former_request(struct request_queue *, struct request *);
111extern struct request *elv_latter_request(request_queue_t *, struct request *); 111extern struct request *elv_latter_request(struct request_queue *, struct request *);
112extern int elv_register_queue(request_queue_t *q); 112extern int elv_register_queue(struct request_queue *q);
113extern void elv_unregister_queue(request_queue_t *q); 113extern void elv_unregister_queue(struct request_queue *q);
114extern int elv_may_queue(request_queue_t *, int); 114extern int elv_may_queue(struct request_queue *, int);
115extern void elv_completed_request(request_queue_t *, struct request *); 115extern void elv_completed_request(struct request_queue *, struct request *);
116extern int elv_set_request(request_queue_t *, struct request *, gfp_t); 116extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
117extern void elv_put_request(request_queue_t *, struct request *); 117extern void elv_put_request(struct request_queue *, struct request *);
118 118
119/* 119/*
120 * io scheduler registration 120 * io scheduler registration
@@ -125,18 +125,18 @@ extern void elv_unregister(struct elevator_type *);
125/* 125/*
126 * io scheduler sysfs switching 126 * io scheduler sysfs switching
127 */ 127 */
128extern ssize_t elv_iosched_show(request_queue_t *, char *); 128extern ssize_t elv_iosched_show(struct request_queue *, char *);
129extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t); 129extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
130 130
131extern int elevator_init(request_queue_t *, char *); 131extern int elevator_init(struct request_queue *, char *);
132extern void elevator_exit(elevator_t *); 132extern void elevator_exit(elevator_t *);
133extern int elv_rq_merge_ok(struct request *, struct bio *); 133extern int elv_rq_merge_ok(struct request *, struct bio *);
134 134
135/* 135/*
136 * Helper functions. 136 * Helper functions.
137 */ 137 */
138extern struct request *elv_rb_former_request(request_queue_t *, struct request *); 138extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
139extern struct request *elv_rb_latter_request(request_queue_t *, struct request *); 139extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
140 140
141/* 141/*
142 * rb support functions. 142 * rb support functions.
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 5f5daad8bc5..d71d0121b7f 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -555,7 +555,7 @@ typedef struct ide_drive_s {
555 char name[4]; /* drive name, such as "hda" */ 555 char name[4]; /* drive name, such as "hda" */
556 char driver_req[10]; /* requests specific driver */ 556 char driver_req[10]; /* requests specific driver */
557 557
558 request_queue_t *queue; /* request queue */ 558 struct request_queue *queue; /* request queue */
559 559
560 struct request *rq; /* current request */ 560 struct request *rq; /* current request */
561 struct ide_drive_s *next; /* circular list of hwgroup drives */ 561 struct ide_drive_s *next; /* circular list of hwgroup drives */
@@ -1206,7 +1206,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1206extern int ide_spin_wait_hwgroup(ide_drive_t *); 1206extern int ide_spin_wait_hwgroup(ide_drive_t *);
1207extern void ide_timer_expiry(unsigned long); 1207extern void ide_timer_expiry(unsigned long);
1208extern irqreturn_t ide_intr(int irq, void *dev_id); 1208extern irqreturn_t ide_intr(int irq, void *dev_id);
1209extern void do_ide_request(request_queue_t *); 1209extern void do_ide_request(struct request_queue *);
1210 1210
1211void ide_init_disk(struct gendisk *, ide_drive_t *); 1211void ide_init_disk(struct gendisk *, ide_drive_t *);
1212 1212
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 0b99b31f017..26a0a103898 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -63,7 +63,7 @@ struct loop_device {
63 struct task_struct *lo_thread; 63 struct task_struct *lo_thread;
64 wait_queue_head_t lo_event; 64 wait_queue_head_t lo_event;
65 65
66 request_queue_t *lo_queue; 66 struct request_queue *lo_queue;
67 struct gendisk *lo_disk; 67 struct gendisk *lo_disk;
68 struct list_head lo_list; 68 struct list_head lo_list;
69}; 69};
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 28ac632b42d..dcb729244f4 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -227,7 +227,7 @@ struct mddev_s
227 unsigned int safemode_delay; 227 unsigned int safemode_delay;
228 struct timer_list safemode_timer; 228 struct timer_list safemode_timer;
229 atomic_t writes_pending; 229 atomic_t writes_pending;
230 request_queue_t *queue; /* for plugging ... */ 230 struct request_queue *queue; /* for plugging ... */
231 231
232 atomic_t write_behind; /* outstanding async IO */ 232 atomic_t write_behind; /* outstanding async IO */
233 unsigned int max_write_behind; /* 0 = sync */ 233 unsigned int max_write_behind; /* 0 = sync */
@@ -265,7 +265,7 @@ struct mdk_personality
265 int level; 265 int level;
266 struct list_head list; 266 struct list_head list;
267 struct module *owner; 267 struct module *owner;
268 int (*make_request)(request_queue_t *q, struct bio *bio); 268 int (*make_request)(struct request_queue *q, struct bio *bio);
269 int (*run)(mddev_t *mddev); 269 int (*run)(mddev_t *mddev);
270 int (*stop)(mddev_t *mddev); 270 int (*stop)(mddev_t *mddev);
271 void (*status)(struct seq_file *seq, mddev_t *mddev); 271 void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/include/scsi/sd.h b/include/scsi/sd.h
index 5261488e110..78583fee0ab 100644
--- a/include/scsi/sd.h
+++ b/include/scsi/sd.h
@@ -57,7 +57,7 @@ static int sd_resume(struct device *dev);
57static void sd_rescan(struct device *); 57static void sd_rescan(struct device *);
58static int sd_init_command(struct scsi_cmnd *); 58static int sd_init_command(struct scsi_cmnd *);
59static int sd_issue_flush(struct device *, sector_t *); 59static int sd_issue_flush(struct device *, sector_t *);
60static void sd_prepare_flush(request_queue_t *, struct request *); 60static void sd_prepare_flush(struct request_queue *, struct request *);
61static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 61static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
62static void scsi_disk_release(struct class_device *cdev); 62static void scsi_disk_release(struct class_device *cdev);
63static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 63static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
diff --git a/mm/bounce.c b/mm/bounce.c
index ad401fc5744..179fe38a241 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
190 return 0; 190 return 0;
191} 191}
192 192
193static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 193static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
194 mempool_t *pool) 194 mempool_t *pool)
195{ 195{
196 struct page *page; 196 struct page *page;
@@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
275 *bio_orig = bio; 275 *bio_orig = bio;
276} 276}
277 277
278void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 278void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
279{ 279{
280 mempool_t *pool; 280 mempool_t *pool;
281 281