diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-25 17:29:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-25 17:29:53 -0400 |
| commit | bfffa1cc9db8a950dd4b1a09999f8a20e69a6652 (patch) | |
| tree | 01b046072ca9105b7852f790762f7b00b72e6ff7 /include/linux/blkdev.h | |
| parent | cc8a0a943948d1b1bc803b37486831af7b04dd38 (diff) | |
| parent | ae994ea972473c0ace9d55f718b60f0727af1381 (diff) | |
Merge branch 'for-4.2/core' of git://git.kernel.dk/linux-block
Pull core block IO update from Jens Axboe:
"Nothing really major in here, mostly a collection of smaller
optimizations and cleanups, mixed with various fixes. In more detail,
this contains:
- Addition of policy specific data to blkcg for block cgroups. From
Arianna Avanzini.
- Various cleanups around command types from Christoph.
- Cleanup of the suspend block I/O path from Christoph.
- Plugging updates from Shaohua and Jeff Moyer, for blk-mq.
- Eliminating atomic inc/dec of both remaining IO count and reference
count in a bio. From me.
- Fixes for SG gap and chunk size support for data-less (discards)
IO, so we can merge these better. From me.
- Small restructuring of blk-mq shared tag support, freeing drivers
from iterating hardware queues. From Keith Busch.
- A few cfq-iosched tweaks, from Tahsin Erdogan and me. Makes the
IOPS mode the default for non-rotational storage"
* 'for-4.2/core' of git://git.kernel.dk/linux-block: (35 commits)
cfq-iosched: fix other locations where blkcg_to_cfqgd() can return NULL
cfq-iosched: fix sysfs oops when attempting to read unconfigured weights
cfq-iosched: move group scheduling functions under ifdef
cfq-iosched: fix the setting of IOPS mode on SSDs
blktrace: Add blktrace.c to BLOCK LAYER in MAINTAINERS file
block, cgroup: implement policy-specific per-blkcg data
block: Make CFQ default to IOPS mode on SSDs
block: add blk_set_queue_dying() to blkdev.h
blk-mq: Shared tag enhancements
block: don't honor chunk sizes for data-less IO
block: only honor SG gap prevention for merges that contain data
block: fix returnvar.cocci warnings
block, dm: don't copy bios for request clones
block: remove management of bi_remaining when restoring original bi_end_io
block: replace trylock with mutex_lock in blkdev_reread_part()
block: export blkdev_reread_part() and __blkdev_reread_part()
suspend: simplify block I/O handling
block: collapse bio bit space
block: remove unused BIO_RW_BLOCK and BIO_EOF flags
block: remove BIO_EOPNOTSUPP
...
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 45 |
1 files changed, 7 insertions, 38 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5d93a6645e88..a6ae5f9bee49 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -30,7 +30,6 @@ struct scsi_ioctl_command; | |||
| 30 | 30 | ||
| 31 | struct request_queue; | 31 | struct request_queue; |
| 32 | struct elevator_queue; | 32 | struct elevator_queue; |
| 33 | struct request_pm_state; | ||
| 34 | struct blk_trace; | 33 | struct blk_trace; |
| 35 | struct request; | 34 | struct request; |
| 36 | struct sg_io_hdr; | 35 | struct sg_io_hdr; |
| @@ -75,18 +74,7 @@ struct request_list { | |||
| 75 | enum rq_cmd_type_bits { | 74 | enum rq_cmd_type_bits { |
| 76 | REQ_TYPE_FS = 1, /* fs request */ | 75 | REQ_TYPE_FS = 1, /* fs request */ |
| 77 | REQ_TYPE_BLOCK_PC, /* scsi command */ | 76 | REQ_TYPE_BLOCK_PC, /* scsi command */ |
| 78 | REQ_TYPE_SENSE, /* sense request */ | 77 | REQ_TYPE_DRV_PRIV, /* driver defined types from here */ |
| 79 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | ||
| 80 | REQ_TYPE_PM_RESUME, /* resume request */ | ||
| 81 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | ||
| 82 | REQ_TYPE_SPECIAL, /* driver defined type */ | ||
| 83 | /* | ||
| 84 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | ||
| 85 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | ||
| 86 | * private REQ_LB opcodes to differentiate what type of request this is | ||
| 87 | */ | ||
| 88 | REQ_TYPE_ATA_TASKFILE, | ||
| 89 | REQ_TYPE_ATA_PC, | ||
| 90 | }; | 78 | }; |
| 91 | 79 | ||
| 92 | #define BLK_MAX_CDB 16 | 80 | #define BLK_MAX_CDB 16 |
| @@ -108,7 +96,7 @@ struct request { | |||
| 108 | struct blk_mq_ctx *mq_ctx; | 96 | struct blk_mq_ctx *mq_ctx; |
| 109 | 97 | ||
| 110 | u64 cmd_flags; | 98 | u64 cmd_flags; |
| 111 | enum rq_cmd_type_bits cmd_type; | 99 | unsigned cmd_type; |
| 112 | unsigned long atomic_flags; | 100 | unsigned long atomic_flags; |
| 113 | 101 | ||
| 114 | int cpu; | 102 | int cpu; |
| @@ -216,19 +204,6 @@ static inline unsigned short req_get_ioprio(struct request *req) | |||
| 216 | return req->ioprio; | 204 | return req->ioprio; |
| 217 | } | 205 | } |
| 218 | 206 | ||
| 219 | /* | ||
| 220 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | ||
| 221 | * requests. Some step values could eventually be made generic. | ||
| 222 | */ | ||
| 223 | struct request_pm_state | ||
| 224 | { | ||
| 225 | /* PM state machine step value, currently driver specific */ | ||
| 226 | int pm_step; | ||
| 227 | /* requested PM state value (S1, S2, S3, S4, ...) */ | ||
| 228 | u32 pm_state; | ||
| 229 | void* data; /* for driver use */ | ||
| 230 | }; | ||
| 231 | |||
| 232 | #include <linux/elevator.h> | 207 | #include <linux/elevator.h> |
| 233 | 208 | ||
| 234 | struct blk_queue_ctx; | 209 | struct blk_queue_ctx; |
| @@ -469,7 +444,7 @@ struct request_queue { | |||
| 469 | struct mutex sysfs_lock; | 444 | struct mutex sysfs_lock; |
| 470 | 445 | ||
| 471 | int bypass_depth; | 446 | int bypass_depth; |
| 472 | int mq_freeze_depth; | 447 | atomic_t mq_freeze_depth; |
| 473 | 448 | ||
| 474 | #if defined(CONFIG_BLK_DEV_BSG) | 449 | #if defined(CONFIG_BLK_DEV_BSG) |
| 475 | bsg_job_fn *bsg_job_fn; | 450 | bsg_job_fn *bsg_job_fn; |
| @@ -610,10 +585,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 610 | (((rq)->cmd_flags & REQ_STARTED) && \ | 585 | (((rq)->cmd_flags & REQ_STARTED) && \ |
| 611 | ((rq)->cmd_type == REQ_TYPE_FS)) | 586 | ((rq)->cmd_type == REQ_TYPE_FS)) |
| 612 | 587 | ||
| 613 | #define blk_pm_request(rq) \ | ||
| 614 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | ||
| 615 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
| 616 | |||
| 617 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 588 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
| 618 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 589 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
| 619 | /* rq->queuelist of dequeued request must be list_empty() */ | 590 | /* rq->queuelist of dequeued request must be list_empty() */ |
| @@ -804,11 +775,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page, | |||
| 804 | unsigned int len); | 775 | unsigned int len); |
| 805 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 776 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
| 806 | extern int blk_lld_busy(struct request_queue *q); | 777 | extern int blk_lld_busy(struct request_queue *q); |
| 807 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 778 | extern void blk_rq_prep_clone(struct request *rq, struct request *rq_src); |
| 808 | struct bio_set *bs, gfp_t gfp_mask, | ||
| 809 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
| 810 | void *data); | ||
| 811 | extern void blk_rq_unprep_clone(struct request *rq); | ||
| 812 | extern int blk_insert_cloned_request(struct request_queue *q, | 779 | extern int blk_insert_cloned_request(struct request_queue *q, |
| 813 | struct request *rq); | 780 | struct request *rq); |
| 814 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 781 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
| @@ -845,6 +812,7 @@ extern void blk_stop_queue(struct request_queue *q); | |||
| 845 | extern void blk_sync_queue(struct request_queue *q); | 812 | extern void blk_sync_queue(struct request_queue *q); |
| 846 | extern void __blk_stop_queue(struct request_queue *q); | 813 | extern void __blk_stop_queue(struct request_queue *q); |
| 847 | extern void __blk_run_queue(struct request_queue *q); | 814 | extern void __blk_run_queue(struct request_queue *q); |
| 815 | extern void __blk_run_queue_uncond(struct request_queue *q); | ||
| 848 | extern void blk_run_queue(struct request_queue *); | 816 | extern void blk_run_queue(struct request_queue *); |
| 849 | extern void blk_run_queue_async(struct request_queue *q); | 817 | extern void blk_run_queue_async(struct request_queue *q); |
| 850 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 818 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| @@ -933,7 +901,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | |||
| 933 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 901 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) |
| 934 | return q->limits.max_hw_sectors; | 902 | return q->limits.max_hw_sectors; |
| 935 | 903 | ||
| 936 | if (!q->limits.chunk_sectors) | 904 | if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) |
| 937 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 905 | return blk_queue_get_max_sectors(q, rq->cmd_flags); |
| 938 | 906 | ||
| 939 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | 907 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), |
| @@ -1054,6 +1022,7 @@ bool __must_check blk_get_queue(struct request_queue *); | |||
| 1054 | struct request_queue *blk_alloc_queue(gfp_t); | 1022 | struct request_queue *blk_alloc_queue(gfp_t); |
| 1055 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 1023 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
| 1056 | extern void blk_put_queue(struct request_queue *); | 1024 | extern void blk_put_queue(struct request_queue *); |
| 1025 | extern void blk_set_queue_dying(struct request_queue *); | ||
| 1057 | 1026 | ||
| 1058 | /* | 1027 | /* |
| 1059 | * block layer runtime pm functions | 1028 | * block layer runtime pm functions |
