diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 50 |
1 files changed, 33 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index efdc9b5bc05c..025a7f084dbd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -96,8 +96,8 @@ struct io_context { | |||
| 96 | 96 | ||
| 97 | void put_io_context(struct io_context *ioc); | 97 | void put_io_context(struct io_context *ioc); |
| 98 | void exit_io_context(void); | 98 | void exit_io_context(void); |
| 99 | struct io_context *current_io_context(int gfp_flags); | 99 | struct io_context *current_io_context(gfp_t gfp_flags); |
| 100 | struct io_context *get_io_context(int gfp_flags); | 100 | struct io_context *get_io_context(gfp_t gfp_flags); |
| 101 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 101 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
| 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
| 103 | 103 | ||
| @@ -107,9 +107,9 @@ typedef void (rq_end_io_fn)(struct request *); | |||
| 107 | struct request_list { | 107 | struct request_list { |
| 108 | int count[2]; | 108 | int count[2]; |
| 109 | int starved[2]; | 109 | int starved[2]; |
| 110 | int elvpriv; | ||
| 110 | mempool_t *rq_pool; | 111 | mempool_t *rq_pool; |
| 111 | wait_queue_head_t wait[2]; | 112 | wait_queue_head_t wait[2]; |
| 112 | wait_queue_head_t drain; | ||
| 113 | }; | 113 | }; |
| 114 | 114 | ||
| 115 | #define BLK_MAX_CDB 16 | 115 | #define BLK_MAX_CDB 16 |
| @@ -203,6 +203,7 @@ struct request { | |||
| 203 | enum rq_flag_bits { | 203 | enum rq_flag_bits { |
| 204 | __REQ_RW, /* not set, read. set, write */ | 204 | __REQ_RW, /* not set, read. set, write */ |
| 205 | __REQ_FAILFAST, /* no low level driver retries */ | 205 | __REQ_FAILFAST, /* no low level driver retries */ |
| 206 | __REQ_SORTED, /* elevator knows about this request */ | ||
| 206 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 207 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
| 207 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 208 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
| 208 | __REQ_CMD, /* is a regular fs rw request */ | 209 | __REQ_CMD, /* is a regular fs rw request */ |
| @@ -210,6 +211,7 @@ enum rq_flag_bits { | |||
| 210 | __REQ_STARTED, /* drive already may have started this one */ | 211 | __REQ_STARTED, /* drive already may have started this one */ |
| 211 | __REQ_DONTPREP, /* don't call prep for this one */ | 212 | __REQ_DONTPREP, /* don't call prep for this one */ |
| 212 | __REQ_QUEUED, /* uses queueing */ | 213 | __REQ_QUEUED, /* uses queueing */ |
| 214 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
| 213 | /* | 215 | /* |
| 214 | * for ATA/ATAPI devices | 216 | * for ATA/ATAPI devices |
| 215 | */ | 217 | */ |
| @@ -235,6 +237,7 @@ enum rq_flag_bits { | |||
| 235 | 237 | ||
| 236 | #define REQ_RW (1 << __REQ_RW) | 238 | #define REQ_RW (1 << __REQ_RW) |
| 237 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 239 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) |
| 240 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
| 238 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 241 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
| 239 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 242 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
| 240 | #define REQ_CMD (1 << __REQ_CMD) | 243 | #define REQ_CMD (1 << __REQ_CMD) |
| @@ -242,6 +245,7 @@ enum rq_flag_bits { | |||
| 242 | #define REQ_STARTED (1 << __REQ_STARTED) | 245 | #define REQ_STARTED (1 << __REQ_STARTED) |
| 243 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | 246 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) |
| 244 | #define REQ_QUEUED (1 << __REQ_QUEUED) | 247 | #define REQ_QUEUED (1 << __REQ_QUEUED) |
| 248 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
| 245 | #define REQ_PC (1 << __REQ_PC) | 249 | #define REQ_PC (1 << __REQ_PC) |
| 246 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) | 250 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) |
| 247 | #define REQ_SENSE (1 << __REQ_SENSE) | 251 | #define REQ_SENSE (1 << __REQ_SENSE) |
| @@ -333,6 +337,12 @@ struct request_queue | |||
| 333 | end_flush_fn *end_flush_fn; | 337 | end_flush_fn *end_flush_fn; |
| 334 | 338 | ||
| 335 | /* | 339 | /* |
| 340 | * Dispatch queue sorting | ||
| 341 | */ | ||
| 342 | sector_t end_sector; | ||
| 343 | struct request *boundary_rq; | ||
| 344 | |||
| 345 | /* | ||
| 336 | * Auto-unplugging state | 346 | * Auto-unplugging state |
| 337 | */ | 347 | */ |
| 338 | struct timer_list unplug_timer; | 348 | struct timer_list unplug_timer; |
| @@ -354,7 +364,7 @@ struct request_queue | |||
| 354 | * queue needs bounce pages for pages above this limit | 364 | * queue needs bounce pages for pages above this limit |
| 355 | */ | 365 | */ |
| 356 | unsigned long bounce_pfn; | 366 | unsigned long bounce_pfn; |
| 357 | unsigned int bounce_gfp; | 367 | gfp_t bounce_gfp; |
| 358 | 368 | ||
| 359 | /* | 369 | /* |
| 360 | * various queue flags, see QUEUE_* below | 370 | * various queue flags, see QUEUE_* below |
| @@ -405,8 +415,6 @@ struct request_queue | |||
| 405 | unsigned int sg_reserved_size; | 415 | unsigned int sg_reserved_size; |
| 406 | int node; | 416 | int node; |
| 407 | 417 | ||
| 408 | struct list_head drain_list; | ||
| 409 | |||
| 410 | /* | 418 | /* |
| 411 | * reserved for flush operations | 419 | * reserved for flush operations |
| 412 | */ | 420 | */ |
| @@ -434,7 +442,7 @@ enum { | |||
| 434 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 442 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 435 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 443 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
| 436 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 444 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| 437 | #define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */ | 445 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
| 438 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ | 446 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ |
| 439 | 447 | ||
| 440 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 448 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| @@ -454,6 +462,7 @@ enum { | |||
| 454 | #define blk_pm_request(rq) \ | 462 | #define blk_pm_request(rq) \ |
| 455 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) | 463 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) |
| 456 | 464 | ||
| 465 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | ||
| 457 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 466 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) |
| 458 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) | 467 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) |
| 459 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) | 468 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) |
| @@ -550,7 +559,7 @@ extern void generic_make_request(struct bio *bio); | |||
| 550 | extern void blk_put_request(struct request *); | 559 | extern void blk_put_request(struct request *); |
| 551 | extern void blk_end_sync_rq(struct request *rq); | 560 | extern void blk_end_sync_rq(struct request *rq); |
| 552 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 561 | extern void blk_attempt_remerge(request_queue_t *, struct request *); |
| 553 | extern struct request *blk_get_request(request_queue_t *, int, int); | 562 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); |
| 554 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 563 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
| 555 | extern void blk_requeue_request(request_queue_t *, struct request *); | 564 | extern void blk_requeue_request(request_queue_t *, struct request *); |
| 556 | extern void blk_plug_device(request_queue_t *); | 565 | extern void blk_plug_device(request_queue_t *); |
| @@ -565,7 +574,7 @@ extern void blk_run_queue(request_queue_t *); | |||
| 565 | extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); | 574 | extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); |
| 566 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); | 575 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); |
| 567 | extern int blk_rq_unmap_user(struct bio *, unsigned int); | 576 | extern int blk_rq_unmap_user(struct bio *, unsigned int); |
| 568 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int); | 577 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); |
| 569 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); | 578 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); |
| 570 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 579 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, |
| 571 | struct request *, int); | 580 | struct request *, int); |
| @@ -611,12 +620,21 @@ extern void end_request(struct request *req, int uptodate); | |||
| 611 | 620 | ||
| 612 | static inline void blkdev_dequeue_request(struct request *req) | 621 | static inline void blkdev_dequeue_request(struct request *req) |
| 613 | { | 622 | { |
| 614 | BUG_ON(list_empty(&req->queuelist)); | 623 | elv_dequeue_request(req->q, req); |
| 624 | } | ||
| 615 | 625 | ||
| 616 | list_del_init(&req->queuelist); | 626 | /* |
| 627 | * This should be in elevator.h, but that requires pulling in rq and q | ||
| 628 | */ | ||
| 629 | static inline void elv_dispatch_add_tail(struct request_queue *q, | ||
| 630 | struct request *rq) | ||
| 631 | { | ||
| 632 | if (q->last_merge == rq) | ||
| 633 | q->last_merge = NULL; | ||
| 617 | 634 | ||
| 618 | if (req->rl) | 635 | q->end_sector = rq_end_sector(rq); |
| 619 | elv_remove_request(req->q, req); | 636 | q->boundary_rq = rq; |
| 637 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
| 620 | } | 638 | } |
| 621 | 639 | ||
| 622 | /* | 640 | /* |
| @@ -650,12 +668,10 @@ extern void blk_dump_rq_flags(struct request *, char *); | |||
| 650 | extern void generic_unplug_device(request_queue_t *); | 668 | extern void generic_unplug_device(request_queue_t *); |
| 651 | extern void __generic_unplug_device(request_queue_t *); | 669 | extern void __generic_unplug_device(request_queue_t *); |
| 652 | extern long nr_blockdev_pages(void); | 670 | extern long nr_blockdev_pages(void); |
| 653 | extern void blk_wait_queue_drained(request_queue_t *, int); | ||
| 654 | extern void blk_finish_queue_drain(request_queue_t *); | ||
| 655 | 671 | ||
| 656 | int blk_get_queue(request_queue_t *); | 672 | int blk_get_queue(request_queue_t *); |
| 657 | request_queue_t *blk_alloc_queue(int gfp_mask); | 673 | request_queue_t *blk_alloc_queue(gfp_t); |
| 658 | request_queue_t *blk_alloc_queue_node(int,int); | 674 | request_queue_t *blk_alloc_queue_node(gfp_t, int); |
| 659 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 675 | #define blk_put_queue(q) blk_cleanup_queue((q)) |
| 660 | 676 | ||
| 661 | /* | 677 | /* |
