diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 26 | ||||
-rw-r--r-- | block/blktrace.c | 10 | ||||
-rw-r--r-- | block/bsg.c | 12 | ||||
-rw-r--r-- | block/cfq-iosched.c | 39 | ||||
-rw-r--r-- | block/deadline-iosched.c | 18 | ||||
-rw-r--r-- | block/elevator.c | 75 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 215 | ||||
-rw-r--r-- | block/noop-iosched.c | 14 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 24 |
9 files changed, 226 insertions, 207 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 3e316dd72529..dc715a562e14 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad) | |||
796 | * as_completed_request is to be called when a request has completed and | 796 | * as_completed_request is to be called when a request has completed and |
797 | * returned something to the requesting process, be it an error or data. | 797 | * returned something to the requesting process, be it an error or data. |
798 | */ | 798 | */ |
799 | static void as_completed_request(request_queue_t *q, struct request *rq) | 799 | static void as_completed_request(struct request_queue *q, struct request *rq) |
800 | { | 800 | { |
801 | struct as_data *ad = q->elevator->elevator_data; | 801 | struct as_data *ad = q->elevator->elevator_data; |
802 | 802 | ||
@@ -853,7 +853,8 @@ out: | |||
853 | * reference unless it replaces the request at somepart of the elevator | 853 | * reference unless it replaces the request at somepart of the elevator |
854 | * (ie. the dispatch queue) | 854 | * (ie. the dispatch queue) |
855 | */ | 855 | */ |
856 | static void as_remove_queued_request(request_queue_t *q, struct request *rq) | 856 | static void as_remove_queued_request(struct request_queue *q, |
857 | struct request *rq) | ||
857 | { | 858 | { |
858 | const int data_dir = rq_is_sync(rq); | 859 | const int data_dir = rq_is_sync(rq); |
859 | struct as_data *ad = q->elevator->elevator_data; | 860 | struct as_data *ad = q->elevator->elevator_data; |
@@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq) | |||
978 | * read/write expire, batch expire, etc, and moves it to the dispatch | 979 | * read/write expire, batch expire, etc, and moves it to the dispatch |
979 | * queue. Returns 1 if a request was found, 0 otherwise. | 980 | * queue. Returns 1 if a request was found, 0 otherwise. |
980 | */ | 981 | */ |
981 | static int as_dispatch_request(request_queue_t *q, int force) | 982 | static int as_dispatch_request(struct request_queue *q, int force) |
982 | { | 983 | { |
983 | struct as_data *ad = q->elevator->elevator_data; | 984 | struct as_data *ad = q->elevator->elevator_data; |
984 | const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); | 985 | const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); |
@@ -1139,7 +1140,7 @@ fifo_expired: | |||
1139 | /* | 1140 | /* |
1140 | * add rq to rbtree and fifo | 1141 | * add rq to rbtree and fifo |
1141 | */ | 1142 | */ |
1142 | static void as_add_request(request_queue_t *q, struct request *rq) | 1143 | static void as_add_request(struct request_queue *q, struct request *rq) |
1143 | { | 1144 | { |
1144 | struct as_data *ad = q->elevator->elevator_data; | 1145 | struct as_data *ad = q->elevator->elevator_data; |
1145 | int data_dir; | 1146 | int data_dir; |
@@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1167 | RQ_SET_STATE(rq, AS_RQ_QUEUED); | 1168 | RQ_SET_STATE(rq, AS_RQ_QUEUED); |
1168 | } | 1169 | } |
1169 | 1170 | ||
1170 | static void as_activate_request(request_queue_t *q, struct request *rq) | 1171 | static void as_activate_request(struct request_queue *q, struct request *rq) |
1171 | { | 1172 | { |
1172 | WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); | 1173 | WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); |
1173 | RQ_SET_STATE(rq, AS_RQ_REMOVED); | 1174 | RQ_SET_STATE(rq, AS_RQ_REMOVED); |
@@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq) | |||
1175 | atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); | 1176 | atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | static void as_deactivate_request(request_queue_t *q, struct request *rq) | 1179 | static void as_deactivate_request(struct request_queue *q, struct request *rq) |
1179 | { | 1180 | { |
1180 | WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); | 1181 | WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); |
1181 | RQ_SET_STATE(rq, AS_RQ_DISPATCHED); | 1182 | RQ_SET_STATE(rq, AS_RQ_DISPATCHED); |
@@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq) | |||
1189 | * is not empty - it is used in the block layer to check for plugging and | 1190 | * is not empty - it is used in the block layer to check for plugging and |
1190 | * merging opportunities | 1191 | * merging opportunities |
1191 | */ | 1192 | */ |
1192 | static int as_queue_empty(request_queue_t *q) | 1193 | static int as_queue_empty(struct request_queue *q) |
1193 | { | 1194 | { |
1194 | struct as_data *ad = q->elevator->elevator_data; | 1195 | struct as_data *ad = q->elevator->elevator_data; |
1195 | 1196 | ||
@@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q) | |||
1198 | } | 1199 | } |
1199 | 1200 | ||
1200 | static int | 1201 | static int |
1201 | as_merge(request_queue_t *q, struct request **req, struct bio *bio) | 1202 | as_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1202 | { | 1203 | { |
1203 | struct as_data *ad = q->elevator->elevator_data; | 1204 | struct as_data *ad = q->elevator->elevator_data; |
1204 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); | 1205 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); |
@@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
1216 | return ELEVATOR_NO_MERGE; | 1217 | return ELEVATOR_NO_MERGE; |
1217 | } | 1218 | } |
1218 | 1219 | ||
1219 | static void as_merged_request(request_queue_t *q, struct request *req, int type) | 1220 | static void as_merged_request(struct request_queue *q, struct request *req, |
1221 | int type) | ||
1220 | { | 1222 | { |
1221 | struct as_data *ad = q->elevator->elevator_data; | 1223 | struct as_data *ad = q->elevator->elevator_data; |
1222 | 1224 | ||
@@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type) | |||
1234 | } | 1236 | } |
1235 | } | 1237 | } |
1236 | 1238 | ||
1237 | static void as_merged_requests(request_queue_t *q, struct request *req, | 1239 | static void as_merged_requests(struct request_queue *q, struct request *req, |
1238 | struct request *next) | 1240 | struct request *next) |
1239 | { | 1241 | { |
1240 | /* | 1242 | /* |
@@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work) | |||
1285 | spin_unlock_irqrestore(q->queue_lock, flags); | 1287 | spin_unlock_irqrestore(q->queue_lock, flags); |
1286 | } | 1288 | } |
1287 | 1289 | ||
1288 | static int as_may_queue(request_queue_t *q, int rw) | 1290 | static int as_may_queue(struct request_queue *q, int rw) |
1289 | { | 1291 | { |
1290 | int ret = ELV_MQUEUE_MAY; | 1292 | int ret = ELV_MQUEUE_MAY; |
1291 | struct as_data *ad = q->elevator->elevator_data; | 1293 | struct as_data *ad = q->elevator->elevator_data; |
@@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e) | |||
1318 | /* | 1320 | /* |
1319 | * initialize elevator private data (as_data). | 1321 | * initialize elevator private data (as_data). |
1320 | */ | 1322 | */ |
1321 | static void *as_init_queue(request_queue_t *q) | 1323 | static void *as_init_queue(struct request_queue *q) |
1322 | { | 1324 | { |
1323 | struct as_data *ad; | 1325 | struct as_data *ad; |
1324 | 1326 | ||
diff --git a/block/blktrace.c b/block/blktrace.c index 3f0e7c37c059..20c3e22587b5 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) | |||
231 | kfree(bt); | 231 | kfree(bt); |
232 | } | 232 | } |
233 | 233 | ||
234 | static int blk_trace_remove(request_queue_t *q) | 234 | static int blk_trace_remove(struct request_queue *q) |
235 | { | 235 | { |
236 | struct blk_trace *bt; | 236 | struct blk_trace *bt; |
237 | 237 | ||
@@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = { | |||
312 | /* | 312 | /* |
313 | * Setup everything required to start tracing | 313 | * Setup everything required to start tracing |
314 | */ | 314 | */ |
315 | static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, | 315 | static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, |
316 | char __user *arg) | 316 | char __user *arg) |
317 | { | 317 | { |
318 | struct blk_user_trace_setup buts; | 318 | struct blk_user_trace_setup buts; |
@@ -401,7 +401,7 @@ err: | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | static int blk_trace_startstop(request_queue_t *q, int start) | 404 | static int blk_trace_startstop(struct request_queue *q, int start) |
405 | { | 405 | { |
406 | struct blk_trace *bt; | 406 | struct blk_trace *bt; |
407 | int ret; | 407 | int ret; |
@@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start) | |||
444 | **/ | 444 | **/ |
445 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | 445 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) |
446 | { | 446 | { |
447 | request_queue_t *q; | 447 | struct request_queue *q; |
448 | int ret, start = 0; | 448 | int ret, start = 0; |
449 | 449 | ||
450 | q = bdev_get_queue(bdev); | 450 | q = bdev_get_queue(bdev); |
@@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
479 | * @q: the request queue associated with the device | 479 | * @q: the request queue associated with the device |
480 | * | 480 | * |
481 | **/ | 481 | **/ |
482 | void blk_trace_shutdown(request_queue_t *q) | 482 | void blk_trace_shutdown(struct request_queue *q) |
483 | { | 483 | { |
484 | if (q->blk_trace) { | 484 | if (q->blk_trace) { |
485 | blk_trace_startstop(q, 0); | 485 | blk_trace_startstop(q, 0); |
diff --git a/block/bsg.c b/block/bsg.c index b571869928a8..3b2f05258a92 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #define BSG_VERSION "0.4" | 37 | #define BSG_VERSION "0.4" |
38 | 38 | ||
39 | struct bsg_device { | 39 | struct bsg_device { |
40 | request_queue_t *queue; | 40 | struct request_queue *queue; |
41 | spinlock_t lock; | 41 | spinlock_t lock; |
42 | struct list_head busy_list; | 42 | struct list_head busy_list; |
43 | struct list_head done_list; | 43 | struct list_head done_list; |
@@ -180,7 +180,7 @@ unlock: | |||
180 | return ret; | 180 | return ret; |
181 | } | 181 | } |
182 | 182 | ||
183 | static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, | 183 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
184 | struct sg_io_v4 *hdr, int has_write_perm) | 184 | struct sg_io_v4 *hdr, int has_write_perm) |
185 | { | 185 | { |
186 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | 186 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ |
@@ -214,7 +214,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, | |||
214 | * Check if sg_io_v4 from user is allowed and valid | 214 | * Check if sg_io_v4 from user is allowed and valid |
215 | */ | 215 | */ |
216 | static int | 216 | static int |
217 | bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) | 217 | bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
218 | { | 218 | { |
219 | int ret = 0; | 219 | int ret = 0; |
220 | 220 | ||
@@ -250,7 +250,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) | |||
250 | static struct request * | 250 | static struct request * |
251 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) | 251 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) |
252 | { | 252 | { |
253 | request_queue_t *q = bd->queue; | 253 | struct request_queue *q = bd->queue; |
254 | struct request *rq, *next_rq = NULL; | 254 | struct request *rq, *next_rq = NULL; |
255 | int ret, rw; | 255 | int ret, rw; |
256 | unsigned int dxfer_len; | 256 | unsigned int dxfer_len; |
@@ -345,7 +345,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
345 | * do final setup of a 'bc' and submit the matching 'rq' to the block | 345 | * do final setup of a 'bc' and submit the matching 'rq' to the block |
346 | * layer for io | 346 | * layer for io |
347 | */ | 347 | */ |
348 | static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, | 348 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
349 | struct bsg_command *bc, struct request *rq) | 349 | struct bsg_command *bc, struct request *rq) |
350 | { | 350 | { |
351 | rq->sense = bc->sense; | 351 | rq->sense = bc->sense; |
@@ -611,7 +611,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
611 | bc = NULL; | 611 | bc = NULL; |
612 | ret = 0; | 612 | ret = 0; |
613 | while (nr_commands) { | 613 | while (nr_commands) { |
614 | request_queue_t *q = bd->queue; | 614 | struct request_queue *q = bd->queue; |
615 | 615 | ||
616 | bc = bsg_alloc_command(bd); | 616 | bc = bsg_alloc_command(bd); |
617 | if (IS_ERR(bc)) { | 617 | if (IS_ERR(bc)) { |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d148ccbc36d1..54dc05439009 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -71,7 +71,7 @@ struct cfq_rb_root { | |||
71 | * Per block device queue structure | 71 | * Per block device queue structure |
72 | */ | 72 | */ |
73 | struct cfq_data { | 73 | struct cfq_data { |
74 | request_queue_t *queue; | 74 | struct request_queue *queue; |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * rr list of queues with requests and the count of them | 77 | * rr list of queues with requests and the count of them |
@@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new); | |||
197 | CFQ_CFQQ_FNS(sync); | 197 | CFQ_CFQQ_FNS(sync); |
198 | #undef CFQ_CFQQ_FNS | 198 | #undef CFQ_CFQQ_FNS |
199 | 199 | ||
200 | static void cfq_dispatch_insert(request_queue_t *, struct request *); | 200 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
201 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 201 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
202 | struct task_struct *, gfp_t); | 202 | struct task_struct *, gfp_t); |
203 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, | 203 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, |
@@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |||
237 | kblockd_schedule_work(&cfqd->unplug_work); | 237 | kblockd_schedule_work(&cfqd->unplug_work); |
238 | } | 238 | } |
239 | 239 | ||
240 | static int cfq_queue_empty(request_queue_t *q) | 240 | static int cfq_queue_empty(struct request_queue *q) |
241 | { | 241 | { |
242 | struct cfq_data *cfqd = q->elevator->elevator_data; | 242 | struct cfq_data *cfqd = q->elevator->elevator_data; |
243 | 243 | ||
@@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) | |||
623 | return NULL; | 623 | return NULL; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void cfq_activate_request(request_queue_t *q, struct request *rq) | 626 | static void cfq_activate_request(struct request_queue *q, struct request *rq) |
627 | { | 627 | { |
628 | struct cfq_data *cfqd = q->elevator->elevator_data; | 628 | struct cfq_data *cfqd = q->elevator->elevator_data; |
629 | 629 | ||
@@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq) | |||
641 | cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; | 641 | cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; |
642 | } | 642 | } |
643 | 643 | ||
644 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) | 644 | static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
645 | { | 645 | { |
646 | struct cfq_data *cfqd = q->elevator->elevator_data; | 646 | struct cfq_data *cfqd = q->elevator->elevator_data; |
647 | 647 | ||
@@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq) | |||
665 | } | 665 | } |
666 | } | 666 | } |
667 | 667 | ||
668 | static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | 668 | static int cfq_merge(struct request_queue *q, struct request **req, |
669 | struct bio *bio) | ||
669 | { | 670 | { |
670 | struct cfq_data *cfqd = q->elevator->elevator_data; | 671 | struct cfq_data *cfqd = q->elevator->elevator_data; |
671 | struct request *__rq; | 672 | struct request *__rq; |
@@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
679 | return ELEVATOR_NO_MERGE; | 680 | return ELEVATOR_NO_MERGE; |
680 | } | 681 | } |
681 | 682 | ||
682 | static void cfq_merged_request(request_queue_t *q, struct request *req, | 683 | static void cfq_merged_request(struct request_queue *q, struct request *req, |
683 | int type) | 684 | int type) |
684 | { | 685 | { |
685 | if (type == ELEVATOR_FRONT_MERGE) { | 686 | if (type == ELEVATOR_FRONT_MERGE) { |
@@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req, | |||
690 | } | 691 | } |
691 | 692 | ||
692 | static void | 693 | static void |
693 | cfq_merged_requests(request_queue_t *q, struct request *rq, | 694 | cfq_merged_requests(struct request_queue *q, struct request *rq, |
694 | struct request *next) | 695 | struct request *next) |
695 | { | 696 | { |
696 | /* | 697 | /* |
@@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, | |||
703 | cfq_remove_request(next); | 704 | cfq_remove_request(next); |
704 | } | 705 | } |
705 | 706 | ||
706 | static int cfq_allow_merge(request_queue_t *q, struct request *rq, | 707 | static int cfq_allow_merge(struct request_queue *q, struct request *rq, |
707 | struct bio *bio) | 708 | struct bio *bio) |
708 | { | 709 | { |
709 | struct cfq_data *cfqd = q->elevator->elevator_data; | 710 | struct cfq_data *cfqd = q->elevator->elevator_data; |
@@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
913 | /* | 914 | /* |
914 | * Move request from internal lists to the request queue dispatch list. | 915 | * Move request from internal lists to the request queue dispatch list. |
915 | */ | 916 | */ |
916 | static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) | 917 | static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) |
917 | { | 918 | { |
918 | struct cfq_data *cfqd = q->elevator->elevator_data; | 919 | struct cfq_data *cfqd = q->elevator->elevator_data; |
919 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 920 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1093 | return dispatched; | 1094 | return dispatched; |
1094 | } | 1095 | } |
1095 | 1096 | ||
1096 | static int cfq_dispatch_requests(request_queue_t *q, int force) | 1097 | static int cfq_dispatch_requests(struct request_queue *q, int force) |
1097 | { | 1098 | { |
1098 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1099 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1099 | struct cfq_queue *cfqq; | 1100 | struct cfq_queue *cfqq; |
@@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) | |||
1214 | struct cfq_data *cfqd = cic->key; | 1215 | struct cfq_data *cfqd = cic->key; |
1215 | 1216 | ||
1216 | if (cfqd) { | 1217 | if (cfqd) { |
1217 | request_queue_t *q = cfqd->queue; | 1218 | struct request_queue *q = cfqd->queue; |
1218 | 1219 | ||
1219 | spin_lock_irq(q->queue_lock); | 1220 | spin_lock_irq(q->queue_lock); |
1220 | __cfq_exit_single_io_context(cfqd, cic); | 1221 | __cfq_exit_single_io_context(cfqd, cic); |
@@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1775 | } | 1776 | } |
1776 | } | 1777 | } |
1777 | 1778 | ||
1778 | static void cfq_insert_request(request_queue_t *q, struct request *rq) | 1779 | static void cfq_insert_request(struct request_queue *q, struct request *rq) |
1779 | { | 1780 | { |
1780 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1781 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1781 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1782 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) | |||
1789 | cfq_rq_enqueued(cfqd, cfqq, rq); | 1790 | cfq_rq_enqueued(cfqd, cfqq, rq); |
1790 | } | 1791 | } |
1791 | 1792 | ||
1792 | static void cfq_completed_request(request_queue_t *q, struct request *rq) | 1793 | static void cfq_completed_request(struct request_queue *q, struct request *rq) |
1793 | { | 1794 | { |
1794 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1795 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1795 | struct cfq_data *cfqd = cfqq->cfqd; | 1796 | struct cfq_data *cfqd = cfqq->cfqd; |
@@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq) | |||
1868 | return ELV_MQUEUE_MAY; | 1869 | return ELV_MQUEUE_MAY; |
1869 | } | 1870 | } |
1870 | 1871 | ||
1871 | static int cfq_may_queue(request_queue_t *q, int rw) | 1872 | static int cfq_may_queue(struct request_queue *q, int rw) |
1872 | { | 1873 | { |
1873 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1874 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1874 | struct task_struct *tsk = current; | 1875 | struct task_struct *tsk = current; |
@@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq) | |||
1922 | * Allocate cfq data structures associated with this request. | 1923 | * Allocate cfq data structures associated with this request. |
1923 | */ | 1924 | */ |
1924 | static int | 1925 | static int |
1925 | cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) | 1926 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1926 | { | 1927 | { |
1927 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1928 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1928 | struct task_struct *tsk = current; | 1929 | struct task_struct *tsk = current; |
@@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
1974 | { | 1975 | { |
1975 | struct cfq_data *cfqd = | 1976 | struct cfq_data *cfqd = |
1976 | container_of(work, struct cfq_data, unplug_work); | 1977 | container_of(work, struct cfq_data, unplug_work); |
1977 | request_queue_t *q = cfqd->queue; | 1978 | struct request_queue *q = cfqd->queue; |
1978 | unsigned long flags; | 1979 | unsigned long flags; |
1979 | 1980 | ||
1980 | spin_lock_irqsave(q->queue_lock, flags); | 1981 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) | |||
2072 | static void cfq_exit_queue(elevator_t *e) | 2073 | static void cfq_exit_queue(elevator_t *e) |
2073 | { | 2074 | { |
2074 | struct cfq_data *cfqd = e->elevator_data; | 2075 | struct cfq_data *cfqd = e->elevator_data; |
2075 | request_queue_t *q = cfqd->queue; | 2076 | struct request_queue *q = cfqd->queue; |
2076 | 2077 | ||
2077 | cfq_shutdown_timer_wq(cfqd); | 2078 | cfq_shutdown_timer_wq(cfqd); |
2078 | 2079 | ||
@@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e) | |||
2098 | kfree(cfqd); | 2099 | kfree(cfqd); |
2099 | } | 2100 | } |
2100 | 2101 | ||
2101 | static void *cfq_init_queue(request_queue_t *q) | 2102 | static void *cfq_init_queue(struct request_queue *q) |
2102 | { | 2103 | { |
2103 | struct cfq_data *cfqd; | 2104 | struct cfq_data *cfqd; |
2104 | 2105 | ||
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 87ca02ac84cb..1a511ffaf8a4 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
106 | /* | 106 | /* |
107 | * remove rq from rbtree and fifo. | 107 | * remove rq from rbtree and fifo. |
108 | */ | 108 | */ |
109 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | 109 | static void deadline_remove_request(struct request_queue *q, struct request *rq) |
110 | { | 110 | { |
111 | struct deadline_data *dd = q->elevator->elevator_data; | 111 | struct deadline_data *dd = q->elevator->elevator_data; |
112 | 112 | ||
@@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | static int | 117 | static int |
118 | deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | 118 | deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) |
119 | { | 119 | { |
120 | struct deadline_data *dd = q->elevator->elevator_data; | 120 | struct deadline_data *dd = q->elevator->elevator_data; |
121 | struct request *__rq; | 121 | struct request *__rq; |
@@ -144,8 +144,8 @@ out: | |||
144 | return ret; | 144 | return ret; |
145 | } | 145 | } |
146 | 146 | ||
147 | static void deadline_merged_request(request_queue_t *q, struct request *req, | 147 | static void deadline_merged_request(struct request_queue *q, |
148 | int type) | 148 | struct request *req, int type) |
149 | { | 149 | { |
150 | struct deadline_data *dd = q->elevator->elevator_data; | 150 | struct deadline_data *dd = q->elevator->elevator_data; |
151 | 151 | ||
@@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req, | |||
159 | } | 159 | } |
160 | 160 | ||
161 | static void | 161 | static void |
162 | deadline_merged_requests(request_queue_t *q, struct request *req, | 162 | deadline_merged_requests(struct request_queue *q, struct request *req, |
163 | struct request *next) | 163 | struct request *next) |
164 | { | 164 | { |
165 | /* | 165 | /* |
@@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req, | |||
185 | static inline void | 185 | static inline void |
186 | deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) | 186 | deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) |
187 | { | 187 | { |
188 | request_queue_t *q = rq->q; | 188 | struct request_queue *q = rq->q; |
189 | 189 | ||
190 | deadline_remove_request(q, rq); | 190 | deadline_remove_request(q, rq); |
191 | elv_dispatch_add_tail(q, rq); | 191 | elv_dispatch_add_tail(q, rq); |
@@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |||
236 | * deadline_dispatch_requests selects the best request according to | 236 | * deadline_dispatch_requests selects the best request according to |
237 | * read/write expire, fifo_batch, etc | 237 | * read/write expire, fifo_batch, etc |
238 | */ | 238 | */ |
239 | static int deadline_dispatch_requests(request_queue_t *q, int force) | 239 | static int deadline_dispatch_requests(struct request_queue *q, int force) |
240 | { | 240 | { |
241 | struct deadline_data *dd = q->elevator->elevator_data; | 241 | struct deadline_data *dd = q->elevator->elevator_data; |
242 | const int reads = !list_empty(&dd->fifo_list[READ]); | 242 | const int reads = !list_empty(&dd->fifo_list[READ]); |
@@ -335,7 +335,7 @@ dispatch_request: | |||
335 | return 1; | 335 | return 1; |
336 | } | 336 | } |
337 | 337 | ||
338 | static int deadline_queue_empty(request_queue_t *q) | 338 | static int deadline_queue_empty(struct request_queue *q) |
339 | { | 339 | { |
340 | struct deadline_data *dd = q->elevator->elevator_data; | 340 | struct deadline_data *dd = q->elevator->elevator_data; |
341 | 341 | ||
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e) | |||
356 | /* | 356 | /* |
357 | * initialize elevator private data (deadline_data). | 357 | * initialize elevator private data (deadline_data). |
358 | */ | 358 | */ |
359 | static void *deadline_init_queue(request_queue_t *q) | 359 | static void *deadline_init_queue(struct request_queue *q) |
360 | { | 360 | { |
361 | struct deadline_data *dd; | 361 | struct deadline_data *dd; |
362 | 362 | ||
diff --git a/block/elevator.c b/block/elevator.c index d265963d1ed3..c6d153de9fd6 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -56,7 +56,7 @@ static const int elv_hash_shift = 6; | |||
56 | */ | 56 | */ |
57 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | 57 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) |
58 | { | 58 | { |
59 | request_queue_t *q = rq->q; | 59 | struct request_queue *q = rq->q; |
60 | elevator_t *e = q->elevator; | 60 | elevator_t *e = q->elevator; |
61 | 61 | ||
62 | if (e->ops->elevator_allow_merge_fn) | 62 | if (e->ops->elevator_allow_merge_fn) |
@@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name) | |||
141 | return e; | 141 | return e; |
142 | } | 142 | } |
143 | 143 | ||
144 | static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) | 144 | static void *elevator_init_queue(struct request_queue *q, |
145 | struct elevator_queue *eq) | ||
145 | { | 146 | { |
146 | return eq->ops->elevator_init_fn(q); | 147 | return eq->ops->elevator_init_fn(q); |
147 | } | 148 | } |
148 | 149 | ||
149 | static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, | 150 | static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, |
150 | void *data) | 151 | void *data) |
151 | { | 152 | { |
152 | q->elevator = eq; | 153 | q->elevator = eq; |
@@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup); | |||
172 | 173 | ||
173 | static struct kobj_type elv_ktype; | 174 | static struct kobj_type elv_ktype; |
174 | 175 | ||
175 | static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) | 176 | static elevator_t *elevator_alloc(struct request_queue *q, |
177 | struct elevator_type *e) | ||
176 | { | 178 | { |
177 | elevator_t *eq; | 179 | elevator_t *eq; |
178 | int i; | 180 | int i; |
@@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj) | |||
212 | kfree(e); | 214 | kfree(e); |
213 | } | 215 | } |
214 | 216 | ||
215 | int elevator_init(request_queue_t *q, char *name) | 217 | int elevator_init(struct request_queue *q, char *name) |
216 | { | 218 | { |
217 | struct elevator_type *e = NULL; | 219 | struct elevator_type *e = NULL; |
218 | struct elevator_queue *eq; | 220 | struct elevator_queue *eq; |
@@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e) | |||
264 | 266 | ||
265 | EXPORT_SYMBOL(elevator_exit); | 267 | EXPORT_SYMBOL(elevator_exit); |
266 | 268 | ||
267 | static void elv_activate_rq(request_queue_t *q, struct request *rq) | 269 | static void elv_activate_rq(struct request_queue *q, struct request *rq) |
268 | { | 270 | { |
269 | elevator_t *e = q->elevator; | 271 | elevator_t *e = q->elevator; |
270 | 272 | ||
@@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq) | |||
272 | e->ops->elevator_activate_req_fn(q, rq); | 274 | e->ops->elevator_activate_req_fn(q, rq); |
273 | } | 275 | } |
274 | 276 | ||
275 | static void elv_deactivate_rq(request_queue_t *q, struct request *rq) | 277 | static void elv_deactivate_rq(struct request_queue *q, struct request *rq) |
276 | { | 278 | { |
277 | elevator_t *e = q->elevator; | 279 | elevator_t *e = q->elevator; |
278 | 280 | ||
@@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq) | |||
285 | hlist_del_init(&rq->hash); | 287 | hlist_del_init(&rq->hash); |
286 | } | 288 | } |
287 | 289 | ||
288 | static void elv_rqhash_del(request_queue_t *q, struct request *rq) | 290 | static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
289 | { | 291 | { |
290 | if (ELV_ON_HASH(rq)) | 292 | if (ELV_ON_HASH(rq)) |
291 | __elv_rqhash_del(rq); | 293 | __elv_rqhash_del(rq); |
292 | } | 294 | } |
293 | 295 | ||
294 | static void elv_rqhash_add(request_queue_t *q, struct request *rq) | 296 | static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
295 | { | 297 | { |
296 | elevator_t *e = q->elevator; | 298 | elevator_t *e = q->elevator; |
297 | 299 | ||
@@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq) | |||
299 | hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); | 301 | hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); |
300 | } | 302 | } |
301 | 303 | ||
302 | static void elv_rqhash_reposition(request_queue_t *q, struct request *rq) | 304 | static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
303 | { | 305 | { |
304 | __elv_rqhash_del(rq); | 306 | __elv_rqhash_del(rq); |
305 | elv_rqhash_add(q, rq); | 307 | elv_rqhash_add(q, rq); |
306 | } | 308 | } |
307 | 309 | ||
308 | static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset) | 310 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
309 | { | 311 | { |
310 | elevator_t *e = q->elevator; | 312 | elevator_t *e = q->elevator; |
311 | struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; | 313 | struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; |
@@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find); | |||
391 | * entry. rq is sort insted into the dispatch queue. To be used by | 393 | * entry. rq is sort insted into the dispatch queue. To be used by |
392 | * specific elevators. | 394 | * specific elevators. |
393 | */ | 395 | */ |
394 | void elv_dispatch_sort(request_queue_t *q, struct request *rq) | 396 | void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
395 | { | 397 | { |
396 | sector_t boundary; | 398 | sector_t boundary; |
397 | struct list_head *entry; | 399 | struct list_head *entry; |
@@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) | |||
449 | 451 | ||
450 | EXPORT_SYMBOL(elv_dispatch_add_tail); | 452 | EXPORT_SYMBOL(elv_dispatch_add_tail); |
451 | 453 | ||
452 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) | 454 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
453 | { | 455 | { |
454 | elevator_t *e = q->elevator; | 456 | elevator_t *e = q->elevator; |
455 | struct request *__rq; | 457 | struct request *__rq; |
@@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
481 | return ELEVATOR_NO_MERGE; | 483 | return ELEVATOR_NO_MERGE; |
482 | } | 484 | } |
483 | 485 | ||
484 | void elv_merged_request(request_queue_t *q, struct request *rq, int type) | 486 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
485 | { | 487 | { |
486 | elevator_t *e = q->elevator; | 488 | elevator_t *e = q->elevator; |
487 | 489 | ||
@@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type) | |||
494 | q->last_merge = rq; | 496 | q->last_merge = rq; |
495 | } | 497 | } |
496 | 498 | ||
497 | void elv_merge_requests(request_queue_t *q, struct request *rq, | 499 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
498 | struct request *next) | 500 | struct request *next) |
499 | { | 501 | { |
500 | elevator_t *e = q->elevator; | 502 | elevator_t *e = q->elevator; |
@@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, | |||
509 | q->last_merge = rq; | 511 | q->last_merge = rq; |
510 | } | 512 | } |
511 | 513 | ||
512 | void elv_requeue_request(request_queue_t *q, struct request *rq) | 514 | void elv_requeue_request(struct request_queue *q, struct request *rq) |
513 | { | 515 | { |
514 | /* | 516 | /* |
515 | * it already went through dequeue, we need to decrement the | 517 | * it already went through dequeue, we need to decrement the |
@@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
526 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 528 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
527 | } | 529 | } |
528 | 530 | ||
529 | static void elv_drain_elevator(request_queue_t *q) | 531 | static void elv_drain_elevator(struct request_queue *q) |
530 | { | 532 | { |
531 | static int printed; | 533 | static int printed; |
532 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | 534 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) |
@@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q) | |||
540 | } | 542 | } |
541 | } | 543 | } |
542 | 544 | ||
543 | void elv_insert(request_queue_t *q, struct request *rq, int where) | 545 | void elv_insert(struct request_queue *q, struct request *rq, int where) |
544 | { | 546 | { |
545 | struct list_head *pos; | 547 | struct list_head *pos; |
546 | unsigned ordseq; | 548 | unsigned ordseq; |
@@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
638 | } | 640 | } |
639 | } | 641 | } |
640 | 642 | ||
641 | void __elv_add_request(request_queue_t *q, struct request *rq, int where, | 643 | void __elv_add_request(struct request_queue *q, struct request *rq, int where, |
642 | int plug) | 644 | int plug) |
643 | { | 645 | { |
644 | if (q->ordcolor) | 646 | if (q->ordcolor) |
@@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
676 | 678 | ||
677 | EXPORT_SYMBOL(__elv_add_request); | 679 | EXPORT_SYMBOL(__elv_add_request); |
678 | 680 | ||
679 | void elv_add_request(request_queue_t *q, struct request *rq, int where, | 681 | void elv_add_request(struct request_queue *q, struct request *rq, int where, |
680 | int plug) | 682 | int plug) |
681 | { | 683 | { |
682 | unsigned long flags; | 684 | unsigned long flags; |
@@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
688 | 690 | ||
689 | EXPORT_SYMBOL(elv_add_request); | 691 | EXPORT_SYMBOL(elv_add_request); |
690 | 692 | ||
691 | static inline struct request *__elv_next_request(request_queue_t *q) | 693 | static inline struct request *__elv_next_request(struct request_queue *q) |
692 | { | 694 | { |
693 | struct request *rq; | 695 | struct request *rq; |
694 | 696 | ||
@@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q) | |||
704 | } | 706 | } |
705 | } | 707 | } |
706 | 708 | ||
707 | struct request *elv_next_request(request_queue_t *q) | 709 | struct request *elv_next_request(struct request_queue *q) |
708 | { | 710 | { |
709 | struct request *rq; | 711 | struct request *rq; |
710 | int ret; | 712 | int ret; |
@@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
770 | 772 | ||
771 | EXPORT_SYMBOL(elv_next_request); | 773 | EXPORT_SYMBOL(elv_next_request); |
772 | 774 | ||
773 | void elv_dequeue_request(request_queue_t *q, struct request *rq) | 775 | void elv_dequeue_request(struct request_queue *q, struct request *rq) |
774 | { | 776 | { |
775 | BUG_ON(list_empty(&rq->queuelist)); | 777 | BUG_ON(list_empty(&rq->queuelist)); |
776 | BUG_ON(ELV_ON_HASH(rq)); | 778 | BUG_ON(ELV_ON_HASH(rq)); |
@@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq) | |||
788 | 790 | ||
789 | EXPORT_SYMBOL(elv_dequeue_request); | 791 | EXPORT_SYMBOL(elv_dequeue_request); |
790 | 792 | ||
791 | int elv_queue_empty(request_queue_t *q) | 793 | int elv_queue_empty(struct request_queue *q) |
792 | { | 794 | { |
793 | elevator_t *e = q->elevator; | 795 | elevator_t *e = q->elevator; |
794 | 796 | ||
@@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q) | |||
803 | 805 | ||
804 | EXPORT_SYMBOL(elv_queue_empty); | 806 | EXPORT_SYMBOL(elv_queue_empty); |
805 | 807 | ||
806 | struct request *elv_latter_request(request_queue_t *q, struct request *rq) | 808 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
807 | { | 809 | { |
808 | elevator_t *e = q->elevator; | 810 | elevator_t *e = q->elevator; |
809 | 811 | ||
@@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq) | |||
812 | return NULL; | 814 | return NULL; |
813 | } | 815 | } |
814 | 816 | ||
815 | struct request *elv_former_request(request_queue_t *q, struct request *rq) | 817 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
816 | { | 818 | { |
817 | elevator_t *e = q->elevator; | 819 | elevator_t *e = q->elevator; |
818 | 820 | ||
@@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) | |||
821 | return NULL; | 823 | return NULL; |
822 | } | 824 | } |
823 | 825 | ||
824 | int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) | 826 | int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
825 | { | 827 | { |
826 | elevator_t *e = q->elevator; | 828 | elevator_t *e = q->elevator; |
827 | 829 | ||
@@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) | |||
832 | return 0; | 834 | return 0; |
833 | } | 835 | } |
834 | 836 | ||
835 | void elv_put_request(request_queue_t *q, struct request *rq) | 837 | void elv_put_request(struct request_queue *q, struct request *rq) |
836 | { | 838 | { |
837 | elevator_t *e = q->elevator; | 839 | elevator_t *e = q->elevator; |
838 | 840 | ||
@@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq) | |||
840 | e->ops->elevator_put_req_fn(rq); | 842 | e->ops->elevator_put_req_fn(rq); |
841 | } | 843 | } |
842 | 844 | ||
843 | int elv_may_queue(request_queue_t *q, int rw) | 845 | int elv_may_queue(struct request_queue *q, int rw) |
844 | { | 846 | { |
845 | elevator_t *e = q->elevator; | 847 | elevator_t *e = q->elevator; |
846 | 848 | ||
@@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw) | |||
850 | return ELV_MQUEUE_MAY; | 852 | return ELV_MQUEUE_MAY; |
851 | } | 853 | } |
852 | 854 | ||
853 | void elv_completed_request(request_queue_t *q, struct request *rq) | 855 | void elv_completed_request(struct request_queue *q, struct request *rq) |
854 | { | 856 | { |
855 | elevator_t *e = q->elevator; | 857 | elevator_t *e = q->elevator; |
856 | 858 | ||
@@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
1006 | * need for the new one. this way we have a chance of going back to the old | 1008 | * need for the new one. this way we have a chance of going back to the old |
1007 | * one, if the new one fails init for some reason. | 1009 | * one, if the new one fails init for some reason. |
1008 | */ | 1010 | */ |
1009 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 1011 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1010 | { | 1012 | { |
1011 | elevator_t *old_elevator, *e; | 1013 | elevator_t *old_elevator, *e; |
1012 | void *data; | 1014 | void *data; |
@@ -1078,7 +1080,8 @@ fail_register: | |||
1078 | return 0; | 1080 | return 0; |
1079 | } | 1081 | } |
1080 | 1082 | ||
1081 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | 1083 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, |
1084 | size_t count) | ||
1082 | { | 1085 | { |
1083 | char elevator_name[ELV_NAME_MAX]; | 1086 | char elevator_name[ELV_NAME_MAX]; |
1084 | size_t len; | 1087 | size_t len; |
@@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | |||
1107 | return count; | 1110 | return count; |
1108 | } | 1111 | } |
1109 | 1112 | ||
1110 | ssize_t elv_iosched_show(request_queue_t *q, char *name) | 1113 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1111 | { | 1114 | { |
1112 | elevator_t *e = q->elevator; | 1115 | elevator_t *e = q->elevator; |
1113 | struct elevator_type *elv = e->elevator_type; | 1116 | struct elevator_type *elv = e->elevator_type; |
@@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
1127 | return len; | 1130 | return len; |
1128 | } | 1131 | } |
1129 | 1132 | ||
1130 | struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) | 1133 | struct request *elv_rb_former_request(struct request_queue *q, |
1134 | struct request *rq) | ||
1131 | { | 1135 | { |
1132 | struct rb_node *rbprev = rb_prev(&rq->rb_node); | 1136 | struct rb_node *rbprev = rb_prev(&rq->rb_node); |
1133 | 1137 | ||
@@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) | |||
1139 | 1143 | ||
1140 | EXPORT_SYMBOL(elv_rb_former_request); | 1144 | EXPORT_SYMBOL(elv_rb_former_request); |
1141 | 1145 | ||
1142 | struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq) | 1146 | struct request *elv_rb_latter_request(struct request_queue *q, |
1147 | struct request *rq) | ||
1143 | { | 1148 | { |
1144 | struct rb_node *rbnext = rb_next(&rq->rb_node); | 1149 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
1145 | 1150 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 66056ca5e631..8c2caff87cc3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work); | |||
40 | static void blk_unplug_timeout(unsigned long data); | 40 | static void blk_unplug_timeout(unsigned long data); |
41 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 41 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
42 | static void init_request_from_bio(struct request *req, struct bio *bio); | 42 | static void init_request_from_bio(struct request *req, struct bio *bio); |
43 | static int __make_request(request_queue_t *q, struct bio *bio); | 43 | static int __make_request(struct request_queue *q, struct bio *bio); |
44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | 44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); |
45 | 45 | ||
46 | /* | 46 | /* |
@@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q) | |||
121 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | 121 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) |
122 | { | 122 | { |
123 | struct backing_dev_info *ret = NULL; | 123 | struct backing_dev_info *ret = NULL; |
124 | request_queue_t *q = bdev_get_queue(bdev); | 124 | struct request_queue *q = bdev_get_queue(bdev); |
125 | 125 | ||
126 | if (q) | 126 | if (q) |
127 | ret = &q->backing_dev_info; | 127 | ret = &q->backing_dev_info; |
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info); | |||
140 | * cdb from the request data for instance. | 140 | * cdb from the request data for instance. |
141 | * | 141 | * |
142 | */ | 142 | */ |
143 | void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) | 143 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
144 | { | 144 | { |
145 | q->prep_rq_fn = pfn; | 145 | q->prep_rq_fn = pfn; |
146 | } | 146 | } |
@@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq); | |||
163 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | 163 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
164 | * honored. | 164 | * honored. |
165 | */ | 165 | */ |
166 | void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) | 166 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
167 | { | 167 | { |
168 | q->merge_bvec_fn = mbfn; | 168 | q->merge_bvec_fn = mbfn; |
169 | } | 169 | } |
170 | 170 | ||
171 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 171 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
172 | 172 | ||
173 | void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) | 173 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
174 | { | 174 | { |
175 | q->softirq_done_fn = fn; | 175 | q->softirq_done_fn = fn; |
176 | } | 176 | } |
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); | |||
199 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | 199 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
200 | * blk_queue_bounce() to create a buffer in normal memory. | 200 | * blk_queue_bounce() to create a buffer in normal memory. |
201 | **/ | 201 | **/ |
202 | void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | 202 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) |
203 | { | 203 | { |
204 | /* | 204 | /* |
205 | * set defaults | 205 | * set defaults |
@@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
235 | 235 | ||
236 | EXPORT_SYMBOL(blk_queue_make_request); | 236 | EXPORT_SYMBOL(blk_queue_make_request); |
237 | 237 | ||
238 | static void rq_init(request_queue_t *q, struct request *rq) | 238 | static void rq_init(struct request_queue *q, struct request *rq) |
239 | { | 239 | { |
240 | INIT_LIST_HEAD(&rq->queuelist); | 240 | INIT_LIST_HEAD(&rq->queuelist); |
241 | INIT_LIST_HEAD(&rq->donelist); | 241 | INIT_LIST_HEAD(&rq->donelist); |
@@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq) | |||
272 | * feature should call this function and indicate so. | 272 | * feature should call this function and indicate so. |
273 | * | 273 | * |
274 | **/ | 274 | **/ |
275 | int blk_queue_ordered(request_queue_t *q, unsigned ordered, | 275 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, |
276 | prepare_flush_fn *prepare_flush_fn) | 276 | prepare_flush_fn *prepare_flush_fn) |
277 | { | 277 | { |
278 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | 278 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && |
@@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered); | |||
311 | * to the block layer by defining it through this call. | 311 | * to the block layer by defining it through this call. |
312 | * | 312 | * |
313 | **/ | 313 | **/ |
314 | void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) | 314 | void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff) |
315 | { | 315 | { |
316 | q->issue_flush_fn = iff; | 316 | q->issue_flush_fn = iff; |
317 | } | 317 | } |
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn); | |||
321 | /* | 321 | /* |
322 | * Cache flushing for ordered writes handling | 322 | * Cache flushing for ordered writes handling |
323 | */ | 323 | */ |
324 | inline unsigned blk_ordered_cur_seq(request_queue_t *q) | 324 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) |
325 | { | 325 | { |
326 | if (!q->ordseq) | 326 | if (!q->ordseq) |
327 | return 0; | 327 | return 0; |
@@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q) | |||
330 | 330 | ||
331 | unsigned blk_ordered_req_seq(struct request *rq) | 331 | unsigned blk_ordered_req_seq(struct request *rq) |
332 | { | 332 | { |
333 | request_queue_t *q = rq->q; | 333 | struct request_queue *q = rq->q; |
334 | 334 | ||
335 | BUG_ON(q->ordseq == 0); | 335 | BUG_ON(q->ordseq == 0); |
336 | 336 | ||
@@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq) | |||
357 | return QUEUE_ORDSEQ_DONE; | 357 | return QUEUE_ORDSEQ_DONE; |
358 | } | 358 | } |
359 | 359 | ||
360 | void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) | 360 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
361 | { | 361 | { |
362 | struct request *rq; | 362 | struct request *rq; |
363 | int uptodate; | 363 | int uptodate; |
@@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error) | |||
401 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | 401 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); |
402 | } | 402 | } |
403 | 403 | ||
404 | static void queue_flush(request_queue_t *q, unsigned which) | 404 | static void queue_flush(struct request_queue *q, unsigned which) |
405 | { | 405 | { |
406 | struct request *rq; | 406 | struct request *rq; |
407 | rq_end_io_fn *end_io; | 407 | rq_end_io_fn *end_io; |
@@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which) | |||
425 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 425 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
426 | } | 426 | } |
427 | 427 | ||
428 | static inline struct request *start_ordered(request_queue_t *q, | 428 | static inline struct request *start_ordered(struct request_queue *q, |
429 | struct request *rq) | 429 | struct request *rq) |
430 | { | 430 | { |
431 | q->bi_size = 0; | 431 | q->bi_size = 0; |
@@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q, | |||
476 | return rq; | 476 | return rq; |
477 | } | 477 | } |
478 | 478 | ||
479 | int blk_do_ordered(request_queue_t *q, struct request **rqp) | 479 | int blk_do_ordered(struct request_queue *q, struct request **rqp) |
480 | { | 480 | { |
481 | struct request *rq = *rqp; | 481 | struct request *rq = *rqp; |
482 | int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | 482 | int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); |
@@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) | |||
527 | 527 | ||
528 | static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) | 528 | static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) |
529 | { | 529 | { |
530 | request_queue_t *q = bio->bi_private; | 530 | struct request_queue *q = bio->bi_private; |
531 | 531 | ||
532 | /* | 532 | /* |
533 | * This is dry run, restore bio_sector and size. We'll finish | 533 | * This is dry run, restore bio_sector and size. We'll finish |
@@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) | |||
551 | static int ordered_bio_endio(struct request *rq, struct bio *bio, | 551 | static int ordered_bio_endio(struct request *rq, struct bio *bio, |
552 | unsigned int nbytes, int error) | 552 | unsigned int nbytes, int error) |
553 | { | 553 | { |
554 | request_queue_t *q = rq->q; | 554 | struct request_queue *q = rq->q; |
555 | bio_end_io_t *endio; | 555 | bio_end_io_t *endio; |
556 | void *private; | 556 | void *private; |
557 | 557 | ||
@@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio, | |||
588 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | 588 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
589 | * buffers for doing I/O to pages residing above @page. | 589 | * buffers for doing I/O to pages residing above @page. |
590 | **/ | 590 | **/ |
591 | void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) | 591 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) |
592 | { | 592 | { |
593 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | 593 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; |
594 | int dma = 0; | 594 | int dma = 0; |
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
624 | * Enables a low level driver to set an upper limit on the size of | 624 | * Enables a low level driver to set an upper limit on the size of |
625 | * received requests. | 625 | * received requests. |
626 | **/ | 626 | **/ |
627 | void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) | 627 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) |
628 | { | 628 | { |
629 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 629 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { |
630 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 630 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
@@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors); | |||
651 | * physical data segments in a request. This would be the largest sized | 651 | * physical data segments in a request. This would be the largest sized |
652 | * scatter list the driver could handle. | 652 | * scatter list the driver could handle. |
653 | **/ | 653 | **/ |
654 | void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) | 654 | void blk_queue_max_phys_segments(struct request_queue *q, |
655 | unsigned short max_segments) | ||
655 | { | 656 | { |
656 | if (!max_segments) { | 657 | if (!max_segments) { |
657 | max_segments = 1; | 658 | max_segments = 1; |
@@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments); | |||
674 | * address/length pairs the host adapter can actually give as once | 675 | * address/length pairs the host adapter can actually give as once |
675 | * to the device. | 676 | * to the device. |
676 | **/ | 677 | **/ |
677 | void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) | 678 | void blk_queue_max_hw_segments(struct request_queue *q, |
679 | unsigned short max_segments) | ||
678 | { | 680 | { |
679 | if (!max_segments) { | 681 | if (!max_segments) { |
680 | max_segments = 1; | 682 | max_segments = 1; |
@@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments); | |||
695 | * Enables a low level driver to set an upper limit on the size of a | 697 | * Enables a low level driver to set an upper limit on the size of a |
696 | * coalesced segment | 698 | * coalesced segment |
697 | **/ | 699 | **/ |
698 | void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) | 700 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
699 | { | 701 | { |
700 | if (max_size < PAGE_CACHE_SIZE) { | 702 | if (max_size < PAGE_CACHE_SIZE) { |
701 | max_size = PAGE_CACHE_SIZE; | 703 | max_size = PAGE_CACHE_SIZE; |
@@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); | |||
718 | * even internal read-modify-write operations). Usually the default | 720 | * even internal read-modify-write operations). Usually the default |
719 | * of 512 covers most hardware. | 721 | * of 512 covers most hardware. |
720 | **/ | 722 | **/ |
721 | void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) | 723 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) |
722 | { | 724 | { |
723 | q->hardsect_size = size; | 725 | q->hardsect_size = size; |
724 | } | 726 | } |
@@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); | |||
735 | * @t: the stacking driver (top) | 737 | * @t: the stacking driver (top) |
736 | * @b: the underlying device (bottom) | 738 | * @b: the underlying device (bottom) |
737 | **/ | 739 | **/ |
738 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) | 740 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
739 | { | 741 | { |
740 | /* zero is "infinity" */ | 742 | /* zero is "infinity" */ |
741 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | 743 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); |
@@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); | |||
756 | * @q: the request queue for the device | 758 | * @q: the request queue for the device |
757 | * @mask: the memory boundary mask | 759 | * @mask: the memory boundary mask |
758 | **/ | 760 | **/ |
759 | void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) | 761 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
760 | { | 762 | { |
761 | if (mask < PAGE_CACHE_SIZE - 1) { | 763 | if (mask < PAGE_CACHE_SIZE - 1) { |
762 | mask = PAGE_CACHE_SIZE - 1; | 764 | mask = PAGE_CACHE_SIZE - 1; |
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); | |||
778 | * this is used when buiding direct io requests for the queue. | 780 | * this is used when buiding direct io requests for the queue. |
779 | * | 781 | * |
780 | **/ | 782 | **/ |
781 | void blk_queue_dma_alignment(request_queue_t *q, int mask) | 783 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
782 | { | 784 | { |
783 | q->dma_alignment = mask; | 785 | q->dma_alignment = mask; |
784 | } | 786 | } |
@@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment); | |||
796 | * | 798 | * |
797 | * no locks need be held. | 799 | * no locks need be held. |
798 | **/ | 800 | **/ |
799 | struct request *blk_queue_find_tag(request_queue_t *q, int tag) | 801 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) |
800 | { | 802 | { |
801 | return blk_map_queue_find_tag(q->queue_tags, tag); | 803 | return blk_map_queue_find_tag(q->queue_tags, tag); |
802 | } | 804 | } |
@@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) | |||
840 | * blk_cleanup_queue() will take care of calling this function, if tagging | 842 | * blk_cleanup_queue() will take care of calling this function, if tagging |
841 | * has been used. So there's no need to call this directly. | 843 | * has been used. So there's no need to call this directly. |
842 | **/ | 844 | **/ |
843 | static void __blk_queue_free_tags(request_queue_t *q) | 845 | static void __blk_queue_free_tags(struct request_queue *q) |
844 | { | 846 | { |
845 | struct blk_queue_tag *bqt = q->queue_tags; | 847 | struct blk_queue_tag *bqt = q->queue_tags; |
846 | 848 | ||
@@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags); | |||
877 | * This is used to disabled tagged queuing to a device, yet leave | 879 | * This is used to disabled tagged queuing to a device, yet leave |
878 | * queue in function. | 880 | * queue in function. |
879 | **/ | 881 | **/ |
880 | void blk_queue_free_tags(request_queue_t *q) | 882 | void blk_queue_free_tags(struct request_queue *q) |
881 | { | 883 | { |
882 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 884 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); |
883 | } | 885 | } |
@@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q) | |||
885 | EXPORT_SYMBOL(blk_queue_free_tags); | 887 | EXPORT_SYMBOL(blk_queue_free_tags); |
886 | 888 | ||
887 | static int | 889 | static int |
888 | init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) | 890 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) |
889 | { | 891 | { |
890 | struct request **tag_index; | 892 | struct request **tag_index; |
891 | unsigned long *tag_map; | 893 | unsigned long *tag_map; |
@@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags); | |||
955 | * @depth: the maximum queue depth supported | 957 | * @depth: the maximum queue depth supported |
956 | * @tags: the tag to use | 958 | * @tags: the tag to use |
957 | **/ | 959 | **/ |
958 | int blk_queue_init_tags(request_queue_t *q, int depth, | 960 | int blk_queue_init_tags(struct request_queue *q, int depth, |
959 | struct blk_queue_tag *tags) | 961 | struct blk_queue_tag *tags) |
960 | { | 962 | { |
961 | int rc; | 963 | int rc; |
@@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags); | |||
996 | * Notes: | 998 | * Notes: |
997 | * Must be called with the queue lock held. | 999 | * Must be called with the queue lock held. |
998 | **/ | 1000 | **/ |
999 | int blk_queue_resize_tags(request_queue_t *q, int new_depth) | 1001 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) |
1000 | { | 1002 | { |
1001 | struct blk_queue_tag *bqt = q->queue_tags; | 1003 | struct blk_queue_tag *bqt = q->queue_tags; |
1002 | struct request **tag_index; | 1004 | struct request **tag_index; |
@@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); | |||
1059 | * Notes: | 1061 | * Notes: |
1060 | * queue lock must be held. | 1062 | * queue lock must be held. |
1061 | **/ | 1063 | **/ |
1062 | void blk_queue_end_tag(request_queue_t *q, struct request *rq) | 1064 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) |
1063 | { | 1065 | { |
1064 | struct blk_queue_tag *bqt = q->queue_tags; | 1066 | struct blk_queue_tag *bqt = q->queue_tags; |
1065 | int tag = rq->tag; | 1067 | int tag = rq->tag; |
@@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); | |||
1111 | * Notes: | 1113 | * Notes: |
1112 | * queue lock must be held. | 1114 | * queue lock must be held. |
1113 | **/ | 1115 | **/ |
1114 | int blk_queue_start_tag(request_queue_t *q, struct request *rq) | 1116 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) |
1115 | { | 1117 | { |
1116 | struct blk_queue_tag *bqt = q->queue_tags; | 1118 | struct blk_queue_tag *bqt = q->queue_tags; |
1117 | int tag; | 1119 | int tag; |
@@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1158 | * Notes: | 1160 | * Notes: |
1159 | * queue lock must be held. | 1161 | * queue lock must be held. |
1160 | **/ | 1162 | **/ |
1161 | void blk_queue_invalidate_tags(request_queue_t *q) | 1163 | void blk_queue_invalidate_tags(struct request_queue *q) |
1162 | { | 1164 | { |
1163 | struct blk_queue_tag *bqt = q->queue_tags; | 1165 | struct blk_queue_tag *bqt = q->queue_tags; |
1164 | struct list_head *tmp, *n; | 1166 | struct list_head *tmp, *n; |
@@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
1205 | 1207 | ||
1206 | EXPORT_SYMBOL(blk_dump_rq_flags); | 1208 | EXPORT_SYMBOL(blk_dump_rq_flags); |
1207 | 1209 | ||
1208 | void blk_recount_segments(request_queue_t *q, struct bio *bio) | 1210 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
1209 | { | 1211 | { |
1210 | struct bio_vec *bv, *bvprv = NULL; | 1212 | struct bio_vec *bv, *bvprv = NULL; |
1211 | int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; | 1213 | int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; |
@@ -1267,7 +1269,7 @@ new_hw_segment: | |||
1267 | } | 1269 | } |
1268 | EXPORT_SYMBOL(blk_recount_segments); | 1270 | EXPORT_SYMBOL(blk_recount_segments); |
1269 | 1271 | ||
1270 | static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | 1272 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
1271 | struct bio *nxt) | 1273 | struct bio *nxt) |
1272 | { | 1274 | { |
1273 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 1275 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) |
@@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, | |||
1288 | return 0; | 1290 | return 0; |
1289 | } | 1291 | } |
1290 | 1292 | ||
1291 | static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | 1293 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, |
1292 | struct bio *nxt) | 1294 | struct bio *nxt) |
1293 | { | 1295 | { |
1294 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 1296 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
@@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, | |||
1308 | * map a request to scatterlist, return number of sg entries setup. Caller | 1310 | * map a request to scatterlist, return number of sg entries setup. Caller |
1309 | * must make sure sg can hold rq->nr_phys_segments entries | 1311 | * must make sure sg can hold rq->nr_phys_segments entries |
1310 | */ | 1312 | */ |
1311 | int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) | 1313 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
1314 | struct scatterlist *sg) | ||
1312 | { | 1315 | { |
1313 | struct bio_vec *bvec, *bvprv; | 1316 | struct bio_vec *bvec, *bvprv; |
1314 | struct bio *bio; | 1317 | struct bio *bio; |
@@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg); | |||
1361 | * specific ones if so desired | 1364 | * specific ones if so desired |
1362 | */ | 1365 | */ |
1363 | 1366 | ||
1364 | static inline int ll_new_mergeable(request_queue_t *q, | 1367 | static inline int ll_new_mergeable(struct request_queue *q, |
1365 | struct request *req, | 1368 | struct request *req, |
1366 | struct bio *bio) | 1369 | struct bio *bio) |
1367 | { | 1370 | { |
@@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q, | |||
1382 | return 1; | 1385 | return 1; |
1383 | } | 1386 | } |
1384 | 1387 | ||
1385 | static inline int ll_new_hw_segment(request_queue_t *q, | 1388 | static inline int ll_new_hw_segment(struct request_queue *q, |
1386 | struct request *req, | 1389 | struct request *req, |
1387 | struct bio *bio) | 1390 | struct bio *bio) |
1388 | { | 1391 | { |
@@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, | |||
1406 | return 1; | 1409 | return 1; |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) | 1412 | int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) |
1410 | { | 1413 | { |
1411 | unsigned short max_sectors; | 1414 | unsigned short max_sectors; |
1412 | int len; | 1415 | int len; |
@@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) | |||
1444 | } | 1447 | } |
1445 | EXPORT_SYMBOL(ll_back_merge_fn); | 1448 | EXPORT_SYMBOL(ll_back_merge_fn); |
1446 | 1449 | ||
1447 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, | 1450 | static int ll_front_merge_fn(struct request_queue *q, struct request *req, |
1448 | struct bio *bio) | 1451 | struct bio *bio) |
1449 | { | 1452 | { |
1450 | unsigned short max_sectors; | 1453 | unsigned short max_sectors; |
@@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, | |||
1483 | return ll_new_hw_segment(q, req, bio); | 1486 | return ll_new_hw_segment(q, req, bio); |
1484 | } | 1487 | } |
1485 | 1488 | ||
1486 | static int ll_merge_requests_fn(request_queue_t *q, struct request *req, | 1489 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
1487 | struct request *next) | 1490 | struct request *next) |
1488 | { | 1491 | { |
1489 | int total_phys_segments; | 1492 | int total_phys_segments; |
@@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, | |||
1539 | * This is called with interrupts off and no requests on the queue and | 1542 | * This is called with interrupts off and no requests on the queue and |
1540 | * with the queue lock held. | 1543 | * with the queue lock held. |
1541 | */ | 1544 | */ |
1542 | void blk_plug_device(request_queue_t *q) | 1545 | void blk_plug_device(struct request_queue *q) |
1543 | { | 1546 | { |
1544 | WARN_ON(!irqs_disabled()); | 1547 | WARN_ON(!irqs_disabled()); |
1545 | 1548 | ||
@@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device); | |||
1562 | * remove the queue from the plugged list, if present. called with | 1565 | * remove the queue from the plugged list, if present. called with |
1563 | * queue lock held and interrupts disabled. | 1566 | * queue lock held and interrupts disabled. |
1564 | */ | 1567 | */ |
1565 | int blk_remove_plug(request_queue_t *q) | 1568 | int blk_remove_plug(struct request_queue *q) |
1566 | { | 1569 | { |
1567 | WARN_ON(!irqs_disabled()); | 1570 | WARN_ON(!irqs_disabled()); |
1568 | 1571 | ||
@@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug); | |||
1578 | /* | 1581 | /* |
1579 | * remove the plug and let it rip.. | 1582 | * remove the plug and let it rip.. |
1580 | */ | 1583 | */ |
1581 | void __generic_unplug_device(request_queue_t *q) | 1584 | void __generic_unplug_device(struct request_queue *q) |
1582 | { | 1585 | { |
1583 | if (unlikely(blk_queue_stopped(q))) | 1586 | if (unlikely(blk_queue_stopped(q))) |
1584 | return; | 1587 | return; |
@@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device); | |||
1592 | 1595 | ||
1593 | /** | 1596 | /** |
1594 | * generic_unplug_device - fire a request queue | 1597 | * generic_unplug_device - fire a request queue |
1595 | * @q: The &request_queue_t in question | 1598 | * @q: The &struct request_queue in question |
1596 | * | 1599 | * |
1597 | * Description: | 1600 | * Description: |
1598 | * Linux uses plugging to build bigger requests queues before letting | 1601 | * Linux uses plugging to build bigger requests queues before letting |
@@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device); | |||
1601 | * gets unplugged, the request_fn defined for the queue is invoked and | 1604 | * gets unplugged, the request_fn defined for the queue is invoked and |
1602 | * transfers started. | 1605 | * transfers started. |
1603 | **/ | 1606 | **/ |
1604 | void generic_unplug_device(request_queue_t *q) | 1607 | void generic_unplug_device(struct request_queue *q) |
1605 | { | 1608 | { |
1606 | spin_lock_irq(q->queue_lock); | 1609 | spin_lock_irq(q->queue_lock); |
1607 | __generic_unplug_device(q); | 1610 | __generic_unplug_device(q); |
@@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device); | |||
1612 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | 1615 | static void blk_backing_dev_unplug(struct backing_dev_info *bdi, |
1613 | struct page *page) | 1616 | struct page *page) |
1614 | { | 1617 | { |
1615 | request_queue_t *q = bdi->unplug_io_data; | 1618 | struct request_queue *q = bdi->unplug_io_data; |
1616 | 1619 | ||
1617 | /* | 1620 | /* |
1618 | * devices don't necessarily have an ->unplug_fn defined | 1621 | * devices don't necessarily have an ->unplug_fn defined |
@@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1627 | 1630 | ||
1628 | static void blk_unplug_work(struct work_struct *work) | 1631 | static void blk_unplug_work(struct work_struct *work) |
1629 | { | 1632 | { |
1630 | request_queue_t *q = container_of(work, request_queue_t, unplug_work); | 1633 | struct request_queue *q = |
1634 | container_of(work, struct request_queue, unplug_work); | ||
1631 | 1635 | ||
1632 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 1636 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, |
1633 | q->rq.count[READ] + q->rq.count[WRITE]); | 1637 | q->rq.count[READ] + q->rq.count[WRITE]); |
@@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work) | |||
1637 | 1641 | ||
1638 | static void blk_unplug_timeout(unsigned long data) | 1642 | static void blk_unplug_timeout(unsigned long data) |
1639 | { | 1643 | { |
1640 | request_queue_t *q = (request_queue_t *)data; | 1644 | struct request_queue *q = (struct request_queue *)data; |
1641 | 1645 | ||
1642 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | 1646 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, |
1643 | q->rq.count[READ] + q->rq.count[WRITE]); | 1647 | q->rq.count[READ] + q->rq.count[WRITE]); |
@@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data) | |||
1647 | 1651 | ||
1648 | /** | 1652 | /** |
1649 | * blk_start_queue - restart a previously stopped queue | 1653 | * blk_start_queue - restart a previously stopped queue |
1650 | * @q: The &request_queue_t in question | 1654 | * @q: The &struct request_queue in question |
1651 | * | 1655 | * |
1652 | * Description: | 1656 | * Description: |
1653 | * blk_start_queue() will clear the stop flag on the queue, and call | 1657 | * blk_start_queue() will clear the stop flag on the queue, and call |
1654 | * the request_fn for the queue if it was in a stopped state when | 1658 | * the request_fn for the queue if it was in a stopped state when |
1655 | * entered. Also see blk_stop_queue(). Queue lock must be held. | 1659 | * entered. Also see blk_stop_queue(). Queue lock must be held. |
1656 | **/ | 1660 | **/ |
1657 | void blk_start_queue(request_queue_t *q) | 1661 | void blk_start_queue(struct request_queue *q) |
1658 | { | 1662 | { |
1659 | WARN_ON(!irqs_disabled()); | 1663 | WARN_ON(!irqs_disabled()); |
1660 | 1664 | ||
@@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
1677 | 1681 | ||
1678 | /** | 1682 | /** |
1679 | * blk_stop_queue - stop a queue | 1683 | * blk_stop_queue - stop a queue |
1680 | * @q: The &request_queue_t in question | 1684 | * @q: The &struct request_queue in question |
1681 | * | 1685 | * |
1682 | * Description: | 1686 | * Description: |
1683 | * The Linux block layer assumes that a block driver will consume all | 1687 | * The Linux block layer assumes that a block driver will consume all |
@@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
1689 | * the driver has signalled it's ready to go again. This happens by calling | 1693 | * the driver has signalled it's ready to go again. This happens by calling |
1690 | * blk_start_queue() to restart queue operations. Queue lock must be held. | 1694 | * blk_start_queue() to restart queue operations. Queue lock must be held. |
1691 | **/ | 1695 | **/ |
1692 | void blk_stop_queue(request_queue_t *q) | 1696 | void blk_stop_queue(struct request_queue *q) |
1693 | { | 1697 | { |
1694 | blk_remove_plug(q); | 1698 | blk_remove_plug(q); |
1695 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 1699 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); |
@@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q) | |||
1746 | EXPORT_SYMBOL(blk_run_queue); | 1750 | EXPORT_SYMBOL(blk_run_queue); |
1747 | 1751 | ||
1748 | /** | 1752 | /** |
1749 | * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed | 1753 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed |
1750 | * @kobj: the kobj belonging of the request queue to be released | 1754 | * @kobj: the kobj belonging of the request queue to be released |
1751 | * | 1755 | * |
1752 | * Description: | 1756 | * Description: |
@@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue); | |||
1762 | **/ | 1766 | **/ |
1763 | static void blk_release_queue(struct kobject *kobj) | 1767 | static void blk_release_queue(struct kobject *kobj) |
1764 | { | 1768 | { |
1765 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | 1769 | struct request_queue *q = |
1770 | container_of(kobj, struct request_queue, kobj); | ||
1766 | struct request_list *rl = &q->rq; | 1771 | struct request_list *rl = &q->rq; |
1767 | 1772 | ||
1768 | blk_sync_queue(q); | 1773 | blk_sync_queue(q); |
@@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj) | |||
1778 | kmem_cache_free(requestq_cachep, q); | 1783 | kmem_cache_free(requestq_cachep, q); |
1779 | } | 1784 | } |
1780 | 1785 | ||
1781 | void blk_put_queue(request_queue_t *q) | 1786 | void blk_put_queue(struct request_queue *q) |
1782 | { | 1787 | { |
1783 | kobject_put(&q->kobj); | 1788 | kobject_put(&q->kobj); |
1784 | } | 1789 | } |
1785 | EXPORT_SYMBOL(blk_put_queue); | 1790 | EXPORT_SYMBOL(blk_put_queue); |
1786 | 1791 | ||
1787 | void blk_cleanup_queue(request_queue_t * q) | 1792 | void blk_cleanup_queue(struct request_queue * q) |
1788 | { | 1793 | { |
1789 | mutex_lock(&q->sysfs_lock); | 1794 | mutex_lock(&q->sysfs_lock); |
1790 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 1795 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); |
@@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q) | |||
1798 | 1803 | ||
1799 | EXPORT_SYMBOL(blk_cleanup_queue); | 1804 | EXPORT_SYMBOL(blk_cleanup_queue); |
1800 | 1805 | ||
1801 | static int blk_init_free_list(request_queue_t *q) | 1806 | static int blk_init_free_list(struct request_queue *q) |
1802 | { | 1807 | { |
1803 | struct request_list *rl = &q->rq; | 1808 | struct request_list *rl = &q->rq; |
1804 | 1809 | ||
@@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q) | |||
1817 | return 0; | 1822 | return 0; |
1818 | } | 1823 | } |
1819 | 1824 | ||
1820 | request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | 1825 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) |
1821 | { | 1826 | { |
1822 | return blk_alloc_queue_node(gfp_mask, -1); | 1827 | return blk_alloc_queue_node(gfp_mask, -1); |
1823 | } | 1828 | } |
@@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue); | |||
1825 | 1830 | ||
1826 | static struct kobj_type queue_ktype; | 1831 | static struct kobj_type queue_ktype; |
1827 | 1832 | ||
1828 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 1833 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1829 | { | 1834 | { |
1830 | request_queue_t *q; | 1835 | struct request_queue *q; |
1831 | 1836 | ||
1832 | q = kmem_cache_alloc_node(requestq_cachep, | 1837 | q = kmem_cache_alloc_node(requestq_cachep, |
1833 | gfp_mask | __GFP_ZERO, node_id); | 1838 | gfp_mask | __GFP_ZERO, node_id); |
@@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node); | |||
1882 | * when the block device is deactivated (such as at module unload). | 1887 | * when the block device is deactivated (such as at module unload). |
1883 | **/ | 1888 | **/ |
1884 | 1889 | ||
1885 | request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 1890 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) |
1886 | { | 1891 | { |
1887 | return blk_init_queue_node(rfn, lock, -1); | 1892 | return blk_init_queue_node(rfn, lock, -1); |
1888 | } | 1893 | } |
1889 | EXPORT_SYMBOL(blk_init_queue); | 1894 | EXPORT_SYMBOL(blk_init_queue); |
1890 | 1895 | ||
1891 | request_queue_t * | 1896 | struct request_queue * |
1892 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 1897 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) |
1893 | { | 1898 | { |
1894 | request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 1899 | struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); |
1895 | 1900 | ||
1896 | if (!q) | 1901 | if (!q) |
1897 | return NULL; | 1902 | return NULL; |
@@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1940 | } | 1945 | } |
1941 | EXPORT_SYMBOL(blk_init_queue_node); | 1946 | EXPORT_SYMBOL(blk_init_queue_node); |
1942 | 1947 | ||
1943 | int blk_get_queue(request_queue_t *q) | 1948 | int blk_get_queue(struct request_queue *q) |
1944 | { | 1949 | { |
1945 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 1950 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
1946 | kobject_get(&q->kobj); | 1951 | kobject_get(&q->kobj); |
@@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q) | |||
1952 | 1957 | ||
1953 | EXPORT_SYMBOL(blk_get_queue); | 1958 | EXPORT_SYMBOL(blk_get_queue); |
1954 | 1959 | ||
1955 | static inline void blk_free_request(request_queue_t *q, struct request *rq) | 1960 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
1956 | { | 1961 | { |
1957 | if (rq->cmd_flags & REQ_ELVPRIV) | 1962 | if (rq->cmd_flags & REQ_ELVPRIV) |
1958 | elv_put_request(q, rq); | 1963 | elv_put_request(q, rq); |
@@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) | |||
1960 | } | 1965 | } |
1961 | 1966 | ||
1962 | static struct request * | 1967 | static struct request * |
1963 | blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) | 1968 | blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) |
1964 | { | 1969 | { |
1965 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 1970 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
1966 | 1971 | ||
@@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) | |||
1988 | * ioc_batching returns true if the ioc is a valid batching request and | 1993 | * ioc_batching returns true if the ioc is a valid batching request and |
1989 | * should be given priority access to a request. | 1994 | * should be given priority access to a request. |
1990 | */ | 1995 | */ |
1991 | static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) | 1996 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) |
1992 | { | 1997 | { |
1993 | if (!ioc) | 1998 | if (!ioc) |
1994 | return 0; | 1999 | return 0; |
@@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) | |||
2009 | * is the behaviour we want though - once it gets a wakeup it should be given | 2014 | * is the behaviour we want though - once it gets a wakeup it should be given |
2010 | * a nice run. | 2015 | * a nice run. |
2011 | */ | 2016 | */ |
2012 | static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) | 2017 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) |
2013 | { | 2018 | { |
2014 | if (!ioc || ioc_batching(q, ioc)) | 2019 | if (!ioc || ioc_batching(q, ioc)) |
2015 | return; | 2020 | return; |
@@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) | |||
2018 | ioc->last_waited = jiffies; | 2023 | ioc->last_waited = jiffies; |
2019 | } | 2024 | } |
2020 | 2025 | ||
2021 | static void __freed_request(request_queue_t *q, int rw) | 2026 | static void __freed_request(struct request_queue *q, int rw) |
2022 | { | 2027 | { |
2023 | struct request_list *rl = &q->rq; | 2028 | struct request_list *rl = &q->rq; |
2024 | 2029 | ||
@@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw) | |||
2037 | * A request has just been released. Account for it, update the full and | 2042 | * A request has just been released. Account for it, update the full and |
2038 | * congestion status, wake up any waiters. Called under q->queue_lock. | 2043 | * congestion status, wake up any waiters. Called under q->queue_lock. |
2039 | */ | 2044 | */ |
2040 | static void freed_request(request_queue_t *q, int rw, int priv) | 2045 | static void freed_request(struct request_queue *q, int rw, int priv) |
2041 | { | 2046 | { |
2042 | struct request_list *rl = &q->rq; | 2047 | struct request_list *rl = &q->rq; |
2043 | 2048 | ||
@@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv) | |||
2057 | * Returns NULL on failure, with queue_lock held. | 2062 | * Returns NULL on failure, with queue_lock held. |
2058 | * Returns !NULL on success, with queue_lock *not held*. | 2063 | * Returns !NULL on success, with queue_lock *not held*. |
2059 | */ | 2064 | */ |
2060 | static struct request *get_request(request_queue_t *q, int rw_flags, | 2065 | static struct request *get_request(struct request_queue *q, int rw_flags, |
2061 | struct bio *bio, gfp_t gfp_mask) | 2066 | struct bio *bio, gfp_t gfp_mask) |
2062 | { | 2067 | { |
2063 | struct request *rq = NULL; | 2068 | struct request *rq = NULL; |
@@ -2162,7 +2167,7 @@ out: | |||
2162 | * | 2167 | * |
2163 | * Called with q->queue_lock held, and returns with it unlocked. | 2168 | * Called with q->queue_lock held, and returns with it unlocked. |
2164 | */ | 2169 | */ |
2165 | static struct request *get_request_wait(request_queue_t *q, int rw_flags, | 2170 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
2166 | struct bio *bio) | 2171 | struct bio *bio) |
2167 | { | 2172 | { |
2168 | const int rw = rw_flags & 0x01; | 2173 | const int rw = rw_flags & 0x01; |
@@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags, | |||
2204 | return rq; | 2209 | return rq; |
2205 | } | 2210 | } |
2206 | 2211 | ||
2207 | struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) | 2212 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) |
2208 | { | 2213 | { |
2209 | struct request *rq; | 2214 | struct request *rq; |
2210 | 2215 | ||
@@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request); | |||
2234 | * | 2239 | * |
2235 | * The queue lock must be held with interrupts disabled. | 2240 | * The queue lock must be held with interrupts disabled. |
2236 | */ | 2241 | */ |
2237 | void blk_start_queueing(request_queue_t *q) | 2242 | void blk_start_queueing(struct request_queue *q) |
2238 | { | 2243 | { |
2239 | if (!blk_queue_plugged(q)) | 2244 | if (!blk_queue_plugged(q)) |
2240 | q->request_fn(q); | 2245 | q->request_fn(q); |
@@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing); | |||
2253 | * more, when that condition happens we need to put the request back | 2258 | * more, when that condition happens we need to put the request back |
2254 | * on the queue. Must be called with queue lock held. | 2259 | * on the queue. Must be called with queue lock held. |
2255 | */ | 2260 | */ |
2256 | void blk_requeue_request(request_queue_t *q, struct request *rq) | 2261 | void blk_requeue_request(struct request_queue *q, struct request *rq) |
2257 | { | 2262 | { |
2258 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 2263 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); |
2259 | 2264 | ||
@@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request); | |||
2284 | * of the queue for things like a QUEUE_FULL message from a device, or a | 2289 | * of the queue for things like a QUEUE_FULL message from a device, or a |
2285 | * host that is unable to accept a particular command. | 2290 | * host that is unable to accept a particular command. |
2286 | */ | 2291 | */ |
2287 | void blk_insert_request(request_queue_t *q, struct request *rq, | 2292 | void blk_insert_request(struct request_queue *q, struct request *rq, |
2288 | int at_head, void *data) | 2293 | int at_head, void *data) |
2289 | { | 2294 | { |
2290 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 2295 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
@@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio) | |||
2330 | return ret; | 2335 | return ret; |
2331 | } | 2336 | } |
2332 | 2337 | ||
2333 | static int __blk_rq_map_user(request_queue_t *q, struct request *rq, | 2338 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
2334 | void __user *ubuf, unsigned int len) | 2339 | void __user *ubuf, unsigned int len) |
2335 | { | 2340 | { |
2336 | unsigned long uaddr; | 2341 | unsigned long uaddr; |
@@ -2403,8 +2408,8 @@ unmap_bio: | |||
2403 | * original bio must be passed back in to blk_rq_unmap_user() for proper | 2408 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
2404 | * unmapping. | 2409 | * unmapping. |
2405 | */ | 2410 | */ |
2406 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | 2411 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
2407 | unsigned long len) | 2412 | void __user *ubuf, unsigned long len) |
2408 | { | 2413 | { |
2409 | unsigned long bytes_read = 0; | 2414 | unsigned long bytes_read = 0; |
2410 | struct bio *bio = NULL; | 2415 | struct bio *bio = NULL; |
@@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
2470 | * original bio must be passed back in to blk_rq_unmap_user() for proper | 2475 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
2471 | * unmapping. | 2476 | * unmapping. |
2472 | */ | 2477 | */ |
2473 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | 2478 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
2474 | struct sg_iovec *iov, int iov_count, unsigned int len) | 2479 | struct sg_iovec *iov, int iov_count, unsigned int len) |
2475 | { | 2480 | { |
2476 | struct bio *bio; | 2481 | struct bio *bio; |
@@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
2540 | * @len: length of user data | 2545 | * @len: length of user data |
2541 | * @gfp_mask: memory allocation flags | 2546 | * @gfp_mask: memory allocation flags |
2542 | */ | 2547 | */ |
2543 | int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | 2548 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
2544 | unsigned int len, gfp_t gfp_mask) | 2549 | unsigned int len, gfp_t gfp_mask) |
2545 | { | 2550 | { |
2546 | struct bio *bio; | 2551 | struct bio *bio; |
@@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern); | |||
2577 | * Insert a fully prepared request at the back of the io scheduler queue | 2582 | * Insert a fully prepared request at the back of the io scheduler queue |
2578 | * for execution. Don't wait for completion. | 2583 | * for execution. Don't wait for completion. |
2579 | */ | 2584 | */ |
2580 | void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, | 2585 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, |
2581 | struct request *rq, int at_head, | 2586 | struct request *rq, int at_head, |
2582 | rq_end_io_fn *done) | 2587 | rq_end_io_fn *done) |
2583 | { | 2588 | { |
@@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | |||
2605 | * Insert a fully prepared request at the back of the io scheduler queue | 2610 | * Insert a fully prepared request at the back of the io scheduler queue |
2606 | * for execution and wait for completion. | 2611 | * for execution and wait for completion. |
2607 | */ | 2612 | */ |
2608 | int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, | 2613 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, |
2609 | struct request *rq, int at_head) | 2614 | struct request *rq, int at_head) |
2610 | { | 2615 | { |
2611 | DECLARE_COMPLETION_ONSTACK(wait); | 2616 | DECLARE_COMPLETION_ONSTACK(wait); |
@@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq); | |||
2648 | */ | 2653 | */ |
2649 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | 2654 | int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) |
2650 | { | 2655 | { |
2651 | request_queue_t *q; | 2656 | struct request_queue *q; |
2652 | 2657 | ||
2653 | if (bdev->bd_disk == NULL) | 2658 | if (bdev->bd_disk == NULL) |
2654 | return -ENXIO; | 2659 | return -ENXIO; |
@@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | |||
2684 | * queue lock is held and interrupts disabled, as we muck with the | 2689 | * queue lock is held and interrupts disabled, as we muck with the |
2685 | * request queue list. | 2690 | * request queue list. |
2686 | */ | 2691 | */ |
2687 | static inline void add_request(request_queue_t * q, struct request * req) | 2692 | static inline void add_request(struct request_queue * q, struct request * req) |
2688 | { | 2693 | { |
2689 | drive_stat_acct(req, req->nr_sectors, 1); | 2694 | drive_stat_acct(req, req->nr_sectors, 1); |
2690 | 2695 | ||
@@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats); | |||
2730 | /* | 2735 | /* |
2731 | * queue lock must be held | 2736 | * queue lock must be held |
2732 | */ | 2737 | */ |
2733 | void __blk_put_request(request_queue_t *q, struct request *req) | 2738 | void __blk_put_request(struct request_queue *q, struct request *req) |
2734 | { | 2739 | { |
2735 | if (unlikely(!q)) | 2740 | if (unlikely(!q)) |
2736 | return; | 2741 | return; |
@@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request); | |||
2760 | void blk_put_request(struct request *req) | 2765 | void blk_put_request(struct request *req) |
2761 | { | 2766 | { |
2762 | unsigned long flags; | 2767 | unsigned long flags; |
2763 | request_queue_t *q = req->q; | 2768 | struct request_queue *q = req->q; |
2764 | 2769 | ||
2765 | /* | 2770 | /* |
2766 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the | 2771 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the |
@@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq); | |||
2798 | /* | 2803 | /* |
2799 | * Has to be called with the request spinlock acquired | 2804 | * Has to be called with the request spinlock acquired |
2800 | */ | 2805 | */ |
2801 | static int attempt_merge(request_queue_t *q, struct request *req, | 2806 | static int attempt_merge(struct request_queue *q, struct request *req, |
2802 | struct request *next) | 2807 | struct request *next) |
2803 | { | 2808 | { |
2804 | if (!rq_mergeable(req) || !rq_mergeable(next)) | 2809 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
@@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, | |||
2851 | return 1; | 2856 | return 1; |
2852 | } | 2857 | } |
2853 | 2858 | ||
2854 | static inline int attempt_back_merge(request_queue_t *q, struct request *rq) | 2859 | static inline int attempt_back_merge(struct request_queue *q, |
2860 | struct request *rq) | ||
2855 | { | 2861 | { |
2856 | struct request *next = elv_latter_request(q, rq); | 2862 | struct request *next = elv_latter_request(q, rq); |
2857 | 2863 | ||
@@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq) | |||
2861 | return 0; | 2867 | return 0; |
2862 | } | 2868 | } |
2863 | 2869 | ||
2864 | static inline int attempt_front_merge(request_queue_t *q, struct request *rq) | 2870 | static inline int attempt_front_merge(struct request_queue *q, |
2871 | struct request *rq) | ||
2865 | { | 2872 | { |
2866 | struct request *prev = elv_former_request(q, rq); | 2873 | struct request *prev = elv_former_request(q, rq); |
2867 | 2874 | ||
@@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio) | |||
2905 | req->start_time = jiffies; | 2912 | req->start_time = jiffies; |
2906 | } | 2913 | } |
2907 | 2914 | ||
2908 | static int __make_request(request_queue_t *q, struct bio *bio) | 2915 | static int __make_request(struct request_queue *q, struct bio *bio) |
2909 | { | 2916 | { |
2910 | struct request *req; | 2917 | struct request *req; |
2911 | int el_ret, nr_sectors, barrier, err; | 2918 | int el_ret, nr_sectors, barrier, err; |
@@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio) | |||
3119 | */ | 3126 | */ |
3120 | static inline void __generic_make_request(struct bio *bio) | 3127 | static inline void __generic_make_request(struct bio *bio) |
3121 | { | 3128 | { |
3122 | request_queue_t *q; | 3129 | struct request_queue *q; |
3123 | sector_t maxsector; | 3130 | sector_t maxsector; |
3124 | sector_t old_sector; | 3131 | sector_t old_sector; |
3125 | int ret, nr_sectors = bio_sectors(bio); | 3132 | int ret, nr_sectors = bio_sectors(bio); |
@@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq) | |||
3312 | struct bio *bio, *prevbio = NULL; | 3319 | struct bio *bio, *prevbio = NULL; |
3313 | int nr_phys_segs, nr_hw_segs; | 3320 | int nr_phys_segs, nr_hw_segs; |
3314 | unsigned int phys_size, hw_size; | 3321 | unsigned int phys_size, hw_size; |
3315 | request_queue_t *q = rq->q; | 3322 | struct request_queue *q = rq->q; |
3316 | 3323 | ||
3317 | if (!rq->bio) | 3324 | if (!rq->bio) |
3318 | return; | 3325 | return; |
@@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate) | |||
3658 | 3665 | ||
3659 | EXPORT_SYMBOL(end_request); | 3666 | EXPORT_SYMBOL(end_request); |
3660 | 3667 | ||
3661 | void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) | 3668 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
3669 | struct bio *bio) | ||
3662 | { | 3670 | { |
3663 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ | 3671 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ |
3664 | rq->cmd_flags |= (bio->bi_rw & 3); | 3672 | rq->cmd_flags |= (bio->bi_rw & 3); |
@@ -3701,7 +3709,7 @@ int __init blk_dev_init(void) | |||
3701 | sizeof(struct request), 0, SLAB_PANIC, NULL); | 3709 | sizeof(struct request), 0, SLAB_PANIC, NULL); |
3702 | 3710 | ||
3703 | requestq_cachep = kmem_cache_create("blkdev_queue", | 3711 | requestq_cachep = kmem_cache_create("blkdev_queue", |
3704 | sizeof(request_queue_t), 0, SLAB_PANIC, NULL); | 3712 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
3705 | 3713 | ||
3706 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 3714 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
3707 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | 3715 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); |
@@ -4021,7 +4029,8 @@ static ssize_t | |||
4021 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 4029 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
4022 | { | 4030 | { |
4023 | struct queue_sysfs_entry *entry = to_queue(attr); | 4031 | struct queue_sysfs_entry *entry = to_queue(attr); |
4024 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | 4032 | struct request_queue *q = |
4033 | container_of(kobj, struct request_queue, kobj); | ||
4025 | ssize_t res; | 4034 | ssize_t res; |
4026 | 4035 | ||
4027 | if (!entry->show) | 4036 | if (!entry->show) |
@@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
4041 | const char *page, size_t length) | 4050 | const char *page, size_t length) |
4042 | { | 4051 | { |
4043 | struct queue_sysfs_entry *entry = to_queue(attr); | 4052 | struct queue_sysfs_entry *entry = to_queue(attr); |
4044 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | 4053 | struct request_queue *q = container_of(kobj, struct request_queue, kobj); |
4045 | 4054 | ||
4046 | ssize_t res; | 4055 | ssize_t res; |
4047 | 4056 | ||
@@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk) | |||
4072 | { | 4081 | { |
4073 | int ret; | 4082 | int ret; |
4074 | 4083 | ||
4075 | request_queue_t *q = disk->queue; | 4084 | struct request_queue *q = disk->queue; |
4076 | 4085 | ||
4077 | if (!q || !q->request_fn) | 4086 | if (!q || !q->request_fn) |
4078 | return -ENXIO; | 4087 | return -ENXIO; |
@@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk) | |||
4097 | 4106 | ||
4098 | void blk_unregister_queue(struct gendisk *disk) | 4107 | void blk_unregister_queue(struct gendisk *disk) |
4099 | { | 4108 | { |
4100 | request_queue_t *q = disk->queue; | 4109 | struct request_queue *q = disk->queue; |
4101 | 4110 | ||
4102 | if (q && q->request_fn) { | 4111 | if (q && q->request_fn) { |
4103 | elv_unregister_queue(q); | 4112 | elv_unregister_queue(q); |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 1c3de2b9a6b5..7563d8aa3944 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -11,13 +11,13 @@ struct noop_data { | |||
11 | struct list_head queue; | 11 | struct list_head queue; |
12 | }; | 12 | }; |
13 | 13 | ||
14 | static void noop_merged_requests(request_queue_t *q, struct request *rq, | 14 | static void noop_merged_requests(struct request_queue *q, struct request *rq, |
15 | struct request *next) | 15 | struct request *next) |
16 | { | 16 | { |
17 | list_del_init(&next->queuelist); | 17 | list_del_init(&next->queuelist); |
18 | } | 18 | } |
19 | 19 | ||
20 | static int noop_dispatch(request_queue_t *q, int force) | 20 | static int noop_dispatch(struct request_queue *q, int force) |
21 | { | 21 | { |
22 | struct noop_data *nd = q->elevator->elevator_data; | 22 | struct noop_data *nd = q->elevator->elevator_data; |
23 | 23 | ||
@@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force) | |||
31 | return 0; | 31 | return 0; |
32 | } | 32 | } |
33 | 33 | ||
34 | static void noop_add_request(request_queue_t *q, struct request *rq) | 34 | static void noop_add_request(struct request_queue *q, struct request *rq) |
35 | { | 35 | { |
36 | struct noop_data *nd = q->elevator->elevator_data; | 36 | struct noop_data *nd = q->elevator->elevator_data; |
37 | 37 | ||
38 | list_add_tail(&rq->queuelist, &nd->queue); | 38 | list_add_tail(&rq->queuelist, &nd->queue); |
39 | } | 39 | } |
40 | 40 | ||
41 | static int noop_queue_empty(request_queue_t *q) | 41 | static int noop_queue_empty(struct request_queue *q) |
42 | { | 42 | { |
43 | struct noop_data *nd = q->elevator->elevator_data; | 43 | struct noop_data *nd = q->elevator->elevator_data; |
44 | 44 | ||
@@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | static struct request * | 48 | static struct request * |
49 | noop_former_request(request_queue_t *q, struct request *rq) | 49 | noop_former_request(struct request_queue *q, struct request *rq) |
50 | { | 50 | { |
51 | struct noop_data *nd = q->elevator->elevator_data; | 51 | struct noop_data *nd = q->elevator->elevator_data; |
52 | 52 | ||
@@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | static struct request * | 58 | static struct request * |
59 | noop_latter_request(request_queue_t *q, struct request *rq) | 59 | noop_latter_request(struct request_queue *q, struct request *rq) |
60 | { | 60 | { |
61 | struct noop_data *nd = q->elevator->elevator_data; | 61 | struct noop_data *nd = q->elevator->elevator_data; |
62 | 62 | ||
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq) | |||
65 | return list_entry(rq->queuelist.next, struct request, queuelist); | 65 | return list_entry(rq->queuelist.next, struct request, queuelist); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void *noop_init_queue(request_queue_t *q) | 68 | static void *noop_init_queue(struct request_queue *q) |
69 | { | 69 | { |
70 | struct noop_data *nd; | 70 | struct noop_data *nd; |
71 | 71 | ||
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index d359a715bbc8..91c73224f4c6 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -49,22 +49,22 @@ static int sg_get_version(int __user *p) | |||
49 | return put_user(sg_version_num, p); | 49 | return put_user(sg_version_num, p); |
50 | } | 50 | } |
51 | 51 | ||
52 | static int scsi_get_idlun(request_queue_t *q, int __user *p) | 52 | static int scsi_get_idlun(struct request_queue *q, int __user *p) |
53 | { | 53 | { |
54 | return put_user(0, p); | 54 | return put_user(0, p); |
55 | } | 55 | } |
56 | 56 | ||
57 | static int scsi_get_bus(request_queue_t *q, int __user *p) | 57 | static int scsi_get_bus(struct request_queue *q, int __user *p) |
58 | { | 58 | { |
59 | return put_user(0, p); | 59 | return put_user(0, p); |
60 | } | 60 | } |
61 | 61 | ||
62 | static int sg_get_timeout(request_queue_t *q) | 62 | static int sg_get_timeout(struct request_queue *q) |
63 | { | 63 | { |
64 | return q->sg_timeout / (HZ / USER_HZ); | 64 | return q->sg_timeout / (HZ / USER_HZ); |
65 | } | 65 | } |
66 | 66 | ||
67 | static int sg_set_timeout(request_queue_t *q, int __user *p) | 67 | static int sg_set_timeout(struct request_queue *q, int __user *p) |
68 | { | 68 | { |
69 | int timeout, err = get_user(timeout, p); | 69 | int timeout, err = get_user(timeout, p); |
70 | 70 | ||
@@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p) | |||
74 | return err; | 74 | return err; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int sg_get_reserved_size(request_queue_t *q, int __user *p) | 77 | static int sg_get_reserved_size(struct request_queue *q, int __user *p) |
78 | { | 78 | { |
79 | unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); | 79 | unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); |
80 | 80 | ||
81 | return put_user(val, p); | 81 | return put_user(val, p); |
82 | } | 82 | } |
83 | 83 | ||
84 | static int sg_set_reserved_size(request_queue_t *q, int __user *p) | 84 | static int sg_set_reserved_size(struct request_queue *q, int __user *p) |
85 | { | 85 | { |
86 | int size, err = get_user(size, p); | 86 | int size, err = get_user(size, p); |
87 | 87 | ||
@@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p) | |||
101 | * will always return that we are ATAPI even for a real SCSI drive, I'm not | 101 | * will always return that we are ATAPI even for a real SCSI drive, I'm not |
102 | * so sure this is worth doing anything about (why would you care??) | 102 | * so sure this is worth doing anything about (why would you care??) |
103 | */ | 103 | */ |
104 | static int sg_emulated_host(request_queue_t *q, int __user *p) | 104 | static int sg_emulated_host(struct request_queue *q, int __user *p) |
105 | { | 105 | { |
106 | return put_user(1, p); | 106 | return put_user(1, p); |
107 | } | 107 | } |
@@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm) | |||
214 | } | 214 | } |
215 | EXPORT_SYMBOL_GPL(blk_verify_command); | 215 | EXPORT_SYMBOL_GPL(blk_verify_command); |
216 | 216 | ||
217 | static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq, | 217 | static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, |
218 | struct sg_io_hdr *hdr, int has_write_perm) | 218 | struct sg_io_hdr *hdr, int has_write_perm) |
219 | { | 219 | { |
220 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | 220 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ |
@@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, | |||
286 | return r; | 286 | return r; |
287 | } | 287 | } |
288 | 288 | ||
289 | static int sg_io(struct file *file, request_queue_t *q, | 289 | static int sg_io(struct file *file, struct request_queue *q, |
290 | struct gendisk *bd_disk, struct sg_io_hdr *hdr) | 290 | struct gendisk *bd_disk, struct sg_io_hdr *hdr) |
291 | { | 291 | { |
292 | unsigned long start_time; | 292 | unsigned long start_time; |
@@ -519,7 +519,8 @@ error: | |||
519 | EXPORT_SYMBOL_GPL(sg_scsi_ioctl); | 519 | EXPORT_SYMBOL_GPL(sg_scsi_ioctl); |
520 | 520 | ||
521 | /* Send basic block requests */ | 521 | /* Send basic block requests */ |
522 | static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) | 522 | static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, |
523 | int cmd, int data) | ||
523 | { | 524 | { |
524 | struct request *rq; | 525 | struct request *rq; |
525 | int err; | 526 | int err; |
@@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c | |||
539 | return err; | 540 | return err; |
540 | } | 541 | } |
541 | 542 | ||
542 | static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data) | 543 | static inline int blk_send_start_stop(struct request_queue *q, |
544 | struct gendisk *bd_disk, int data) | ||
543 | { | 545 | { |
544 | return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); | 546 | return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); |
545 | } | 547 | } |