diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-11 11:41:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-11 11:41:17 -0400 |
commit | 23d4ed53b7342bf5999b3ea227d9f69e75e5a625 (patch) | |
tree | 86229fb558235c2f742b35c0c66d5d98003f5f6e | |
parent | e413a19a8ef49ae3b76310bb569dabe66b22f5a3 (diff) | |
parent | a2d445d440003f2d70ee4cd4970ea82ace616fee (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe:
"Final small batch of fixes to be included before -rc1. Some general
cleanups in here as well, but some of the blk-mq fixes we need for the
NVMe conversion and/or scsi-mq. The pull request contains:
- Support for not merging across a specified "chunk size", if set by
the driver. Some NVMe devices perform poorly for IO that crosses
such a chunk, so we need to support it generically as part of
request merging avoid having to do complicated split logic. From
me.
- Bump max tag depth to 10Ki tags. Some scsi devices have a huge
shared tag space. Before we failed with EINVAL if a too large tag
depth was specified, now we truncate it and pass back the actual
value. From me.
- Various blk-mq rq init fixes from me and others.
- A fix for enter on a dying queue for blk-mq from Keith. This is
needed to prevent oopsing on hot device removal.
- Fixup for blk-mq timer addition from Ming Lei.
- Small round of performance fixes for mtip32xx from Sam Bradshaw.
- Minor stack leak fix from Rickard Strandqvist.
- Two __init annotations from Fabian Frederick"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: add __init to blkcg_policy_register
block: add __init to elv_register
block: ensure that bio_add_page() always accepts a page for an empty bio
blk-mq: add timer in blk_mq_start_request
blk-mq: always initialize request->start_time
block: blk-exec.c: Cleaning up local variable address returnd
mtip32xx: minor performance enhancements
blk-mq: ->timeout should be cleared in blk_mq_rq_ctx_init()
blk-mq: don't allow queue entering for a dying queue
blk-mq: bump max tag depth to 10K tags
block: add blk_rq_set_block_pc()
block: add notion of a chunk size for request merging
28 files changed, 135 insertions, 60 deletions
diff --git a/block/bio.c b/block/bio.c index 1ba33657160f..8c2e55e39a1b 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -849,7 +849,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
849 | unsigned int offset) | 849 | unsigned int offset) |
850 | { | 850 | { |
851 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 851 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
852 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); | 852 | unsigned int max_sectors; |
853 | |||
854 | max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | ||
855 | if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) | ||
856 | max_sectors = len >> 9; | ||
857 | |||
858 | return __bio_add_page(q, bio, page, len, offset, max_sectors); | ||
853 | } | 859 | } |
854 | EXPORT_SYMBOL(bio_add_page); | 860 | EXPORT_SYMBOL(bio_add_page); |
855 | 861 | ||
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9f5bce33e6fe..069bc202ffe3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -1093,7 +1093,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |||
1093 | * Register @pol with blkcg core. Might sleep and @pol may be modified on | 1093 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
1094 | * successful registration. Returns 0 on success and -errno on failure. | 1094 | * successful registration. Returns 0 on success and -errno on failure. |
1095 | */ | 1095 | */ |
1096 | int blkcg_policy_register(struct blkcg_policy *pol) | 1096 | int __init blkcg_policy_register(struct blkcg_policy *pol) |
1097 | { | 1097 | { |
1098 | int i, ret; | 1098 | int i, ret; |
1099 | 1099 | ||
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index d692b29c083a..cbb7f943f78a 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -145,7 +145,7 @@ void blkcg_drain_queue(struct request_queue *q); | |||
145 | void blkcg_exit_queue(struct request_queue *q); | 145 | void blkcg_exit_queue(struct request_queue *q); |
146 | 146 | ||
147 | /* Blkio controller policy registration */ | 147 | /* Blkio controller policy registration */ |
148 | int blkcg_policy_register(struct blkcg_policy *pol); | 148 | int __init blkcg_policy_register(struct blkcg_policy *pol); |
149 | void blkcg_policy_unregister(struct blkcg_policy *pol); | 149 | void blkcg_policy_unregister(struct blkcg_policy *pol); |
150 | int blkcg_activate_policy(struct request_queue *q, | 150 | int blkcg_activate_policy(struct request_queue *q, |
151 | const struct blkcg_policy *pol); | 151 | const struct blkcg_policy *pol); |
@@ -580,7 +580,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret | |||
580 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } | 580 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
581 | static inline void blkcg_drain_queue(struct request_queue *q) { } | 581 | static inline void blkcg_drain_queue(struct request_queue *q) { } |
582 | static inline void blkcg_exit_queue(struct request_queue *q) { } | 582 | static inline void blkcg_exit_queue(struct request_queue *q) { } |
583 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } | 583 | static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
584 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | 584 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } |
585 | static inline int blkcg_activate_policy(struct request_queue *q, | 585 | static inline int blkcg_activate_policy(struct request_queue *q, |
586 | const struct blkcg_policy *pol) { return 0; } | 586 | const struct blkcg_policy *pol) { return 0; } |
diff --git a/block/blk-core.c b/block/blk-core.c index 40d654861c33..9aca8c71e70b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1218,6 +1218,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio, | |||
1218 | if (unlikely(!rq)) | 1218 | if (unlikely(!rq)) |
1219 | return ERR_PTR(-ENOMEM); | 1219 | return ERR_PTR(-ENOMEM); |
1220 | 1220 | ||
1221 | blk_rq_set_block_pc(rq); | ||
1222 | |||
1221 | for_each_bio(bio) { | 1223 | for_each_bio(bio) { |
1222 | struct bio *bounce_bio = bio; | 1224 | struct bio *bounce_bio = bio; |
1223 | int ret; | 1225 | int ret; |
@@ -1235,6 +1237,22 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio, | |||
1235 | EXPORT_SYMBOL(blk_make_request); | 1237 | EXPORT_SYMBOL(blk_make_request); |
1236 | 1238 | ||
1237 | /** | 1239 | /** |
1240 | * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC | ||
1241 | * @rq: request to be initialized | ||
1242 | * | ||
1243 | */ | ||
1244 | void blk_rq_set_block_pc(struct request *rq) | ||
1245 | { | ||
1246 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1247 | rq->__data_len = 0; | ||
1248 | rq->__sector = (sector_t) -1; | ||
1249 | rq->bio = rq->biotail = NULL; | ||
1250 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); | ||
1251 | rq->cmd = rq->__cmd; | ||
1252 | } | ||
1253 | EXPORT_SYMBOL(blk_rq_set_block_pc); | ||
1254 | |||
1255 | /** | ||
1238 | * blk_requeue_request - put a request back on queue | 1256 | * blk_requeue_request - put a request back on queue |
1239 | * @q: request queue where request should be inserted | 1257 | * @q: request queue where request should be inserted |
1240 | * @rq: request to be inserted | 1258 | * @rq: request to be inserted |
diff --git a/block/blk-exec.c b/block/blk-exec.c index dbf4502b1d67..f4d27b12c90b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -132,6 +132,11 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, | |||
132 | if (rq->errors) | 132 | if (rq->errors) |
133 | err = -EIO; | 133 | err = -EIO; |
134 | 134 | ||
135 | if (rq->sense == sense) { | ||
136 | rq->sense = NULL; | ||
137 | rq->sense_len = 0; | ||
138 | } | ||
139 | |||
135 | return err; | 140 | return err; |
136 | } | 141 | } |
137 | EXPORT_SYMBOL(blk_execute_rq); | 142 | EXPORT_SYMBOL(blk_execute_rq); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4e4cd6208052..e11f5f8e0313 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -82,8 +82,10 @@ static int blk_mq_queue_enter(struct request_queue *q) | |||
82 | 82 | ||
83 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); | 83 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); |
84 | smp_wmb(); | 84 | smp_wmb(); |
85 | /* we have problems to freeze the queue if it's initializing */ | 85 | |
86 | if (!blk_queue_bypass(q) || !blk_queue_init_done(q)) | 86 | /* we have problems freezing the queue if it's initializing */ |
87 | if (!blk_queue_dying(q) && | ||
88 | (!blk_queue_bypass(q) || !blk_queue_init_done(q))) | ||
87 | return 0; | 89 | return 0; |
88 | 90 | ||
89 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); | 91 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); |
@@ -183,6 +185,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
183 | RB_CLEAR_NODE(&rq->rb_node); | 185 | RB_CLEAR_NODE(&rq->rb_node); |
184 | rq->rq_disk = NULL; | 186 | rq->rq_disk = NULL; |
185 | rq->part = NULL; | 187 | rq->part = NULL; |
188 | rq->start_time = jiffies; | ||
186 | #ifdef CONFIG_BLK_CGROUP | 189 | #ifdef CONFIG_BLK_CGROUP |
187 | rq->rl = NULL; | 190 | rq->rl = NULL; |
188 | set_start_time_ns(rq); | 191 | set_start_time_ns(rq); |
@@ -202,6 +205,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
202 | rq->sense = NULL; | 205 | rq->sense = NULL; |
203 | 206 | ||
204 | INIT_LIST_HEAD(&rq->timeout_list); | 207 | INIT_LIST_HEAD(&rq->timeout_list); |
208 | rq->timeout = 0; | ||
209 | |||
205 | rq->end_io = NULL; | 210 | rq->end_io = NULL; |
206 | rq->end_io_data = NULL; | 211 | rq->end_io_data = NULL; |
207 | rq->next_rq = NULL; | 212 | rq->next_rq = NULL; |
@@ -406,16 +411,7 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
406 | if (unlikely(blk_bidi_rq(rq))) | 411 | if (unlikely(blk_bidi_rq(rq))) |
407 | rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); | 412 | rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); |
408 | 413 | ||
409 | /* | 414 | blk_add_timer(rq); |
410 | * Just mark start time and set the started bit. Due to memory | ||
411 | * ordering, we know we'll see the correct deadline as long as | ||
412 | * REQ_ATOMIC_STARTED is seen. Use the default queue timeout, | ||
413 | * unless one has been set in the request. | ||
414 | */ | ||
415 | if (!rq->timeout) | ||
416 | rq->deadline = jiffies + q->rq_timeout; | ||
417 | else | ||
418 | rq->deadline = jiffies + rq->timeout; | ||
419 | 415 | ||
420 | /* | 416 | /* |
421 | * Mark us as started and clear complete. Complete might have been | 417 | * Mark us as started and clear complete. Complete might have been |
@@ -967,11 +963,6 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
967 | list_add_tail(&rq->queuelist, &ctx->rq_list); | 963 | list_add_tail(&rq->queuelist, &ctx->rq_list); |
968 | 964 | ||
969 | blk_mq_hctx_mark_pending(hctx, ctx); | 965 | blk_mq_hctx_mark_pending(hctx, ctx); |
970 | |||
971 | /* | ||
972 | * We do this early, to ensure we are on the right CPU. | ||
973 | */ | ||
974 | blk_add_timer(rq); | ||
975 | } | 966 | } |
976 | 967 | ||
977 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | 968 | void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, |
@@ -1100,10 +1091,8 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) | |||
1100 | { | 1091 | { |
1101 | init_request_from_bio(rq, bio); | 1092 | init_request_from_bio(rq, bio); |
1102 | 1093 | ||
1103 | if (blk_do_io_stat(rq)) { | 1094 | if (blk_do_io_stat(rq)) |
1104 | rq->start_time = jiffies; | ||
1105 | blk_account_io_start(rq, 1); | 1095 | blk_account_io_start(rq, 1); |
1106 | } | ||
1107 | } | 1096 | } |
1108 | 1097 | ||
1109 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, | 1098 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, |
@@ -1216,7 +1205,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1216 | 1205 | ||
1217 | blk_mq_bio_to_request(rq, bio); | 1206 | blk_mq_bio_to_request(rq, bio); |
1218 | blk_mq_start_request(rq, true); | 1207 | blk_mq_start_request(rq, true); |
1219 | blk_add_timer(rq); | ||
1220 | 1208 | ||
1221 | /* | 1209 | /* |
1222 | * For OK queue, we are done. For error, kill it. Any other | 1210 | * For OK queue, we are done. For error, kill it. Any other |
@@ -1967,13 +1955,19 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
1967 | return NOTIFY_OK; | 1955 | return NOTIFY_OK; |
1968 | } | 1956 | } |
1969 | 1957 | ||
1958 | /* | ||
1959 | * Alloc a tag set to be associated with one or more request queues. | ||
1960 | * May fail with EINVAL for various error conditions. May adjust the | ||
1961 | * requested depth down, if if it too large. In that case, the set | ||
1962 | * value will be stored in set->queue_depth. | ||
1963 | */ | ||
1970 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) | 1964 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) |
1971 | { | 1965 | { |
1972 | int i; | 1966 | int i; |
1973 | 1967 | ||
1974 | if (!set->nr_hw_queues) | 1968 | if (!set->nr_hw_queues) |
1975 | return -EINVAL; | 1969 | return -EINVAL; |
1976 | if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH) | 1970 | if (!set->queue_depth) |
1977 | return -EINVAL; | 1971 | return -EINVAL; |
1978 | if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) | 1972 | if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) |
1979 | return -EINVAL; | 1973 | return -EINVAL; |
@@ -1981,6 +1975,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) | |||
1981 | if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) | 1975 | if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) |
1982 | return -EINVAL; | 1976 | return -EINVAL; |
1983 | 1977 | ||
1978 | if (set->queue_depth > BLK_MQ_MAX_DEPTH) { | ||
1979 | pr_info("blk-mq: reduced tag depth to %u\n", | ||
1980 | BLK_MQ_MAX_DEPTH); | ||
1981 | set->queue_depth = BLK_MQ_MAX_DEPTH; | ||
1982 | } | ||
1984 | 1983 | ||
1985 | set->tags = kmalloc_node(set->nr_hw_queues * | 1984 | set->tags = kmalloc_node(set->nr_hw_queues * |
1986 | sizeof(struct blk_mq_tags *), | 1985 | sizeof(struct blk_mq_tags *), |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 5d21239bc859..f1a1795a5683 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
113 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 113 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
114 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 114 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
115 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; | 115 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; |
116 | lim->chunk_sectors = 0; | ||
116 | lim->max_write_same_sectors = 0; | 117 | lim->max_write_same_sectors = 0; |
117 | lim->max_discard_sectors = 0; | 118 | lim->max_discard_sectors = 0; |
118 | lim->discard_granularity = 0; | 119 | lim->discard_granularity = 0; |
@@ -277,6 +278,26 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto | |||
277 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 278 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
278 | 279 | ||
279 | /** | 280 | /** |
281 | * blk_queue_chunk_sectors - set size of the chunk for this queue | ||
282 | * @q: the request queue for the device | ||
283 | * @chunk_sectors: chunk sectors in the usual 512b unit | ||
284 | * | ||
285 | * Description: | ||
286 | * If a driver doesn't want IOs to cross a given chunk size, it can set | ||
287 | * this limit and prevent merging across chunks. Note that the chunk size | ||
288 | * must currently be a power-of-2 in sectors. Also note that the block | ||
289 | * layer must accept a page worth of data at any offset. So if the | ||
290 | * crossing of chunks is a hard limitation in the driver, it must still be | ||
291 | * prepared to split single page bios. | ||
292 | **/ | ||
293 | void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) | ||
294 | { | ||
295 | BUG_ON(!is_power_of_2(chunk_sectors)); | ||
296 | q->limits.chunk_sectors = chunk_sectors; | ||
297 | } | ||
298 | EXPORT_SYMBOL(blk_queue_chunk_sectors); | ||
299 | |||
300 | /** | ||
280 | * blk_queue_max_discard_sectors - set max sectors for a single discard | 301 | * blk_queue_max_discard_sectors - set max sectors for a single discard |
281 | * @q: the request queue for the device | 302 | * @q: the request queue for the device |
282 | * @max_discard_sectors: maximum number of sectors to discard | 303 | * @max_discard_sectors: maximum number of sectors to discard |
diff --git a/block/bsg.c b/block/bsg.c index e5214c148096..ff46addde5d8 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -196,7 +196,6 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, | |||
196 | * fill in request structure | 196 | * fill in request structure |
197 | */ | 197 | */ |
198 | rq->cmd_len = hdr->request_len; | 198 | rq->cmd_len = hdr->request_len; |
199 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
200 | 199 | ||
201 | rq->timeout = msecs_to_jiffies(hdr->timeout); | 200 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
202 | if (!rq->timeout) | 201 | if (!rq->timeout) |
@@ -273,6 +272,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, | |||
273 | rq = blk_get_request(q, rw, GFP_KERNEL); | 272 | rq = blk_get_request(q, rw, GFP_KERNEL); |
274 | if (!rq) | 273 | if (!rq) |
275 | return ERR_PTR(-ENOMEM); | 274 | return ERR_PTR(-ENOMEM); |
275 | blk_rq_set_block_pc(rq); | ||
276 | |||
276 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); | 277 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); |
277 | if (ret) | 278 | if (ret) |
278 | goto out; | 279 | goto out; |
diff --git a/block/elevator.c b/block/elevator.c index 1e01b66a0b92..f35edddfe9b5 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -845,7 +845,7 @@ void elv_unregister_queue(struct request_queue *q) | |||
845 | } | 845 | } |
846 | EXPORT_SYMBOL(elv_unregister_queue); | 846 | EXPORT_SYMBOL(elv_unregister_queue); |
847 | 847 | ||
848 | int elv_register(struct elevator_type *e) | 848 | int __init elv_register(struct elevator_type *e) |
849 | { | 849 | { |
850 | char *def = ""; | 850 | char *def = ""; |
851 | 851 | ||
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 9c28a5b38042..14695c6221c8 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -229,7 +229,6 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | |||
229 | * fill in request structure | 229 | * fill in request structure |
230 | */ | 230 | */ |
231 | rq->cmd_len = hdr->cmd_len; | 231 | rq->cmd_len = hdr->cmd_len; |
232 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
233 | 232 | ||
234 | rq->timeout = msecs_to_jiffies(hdr->timeout); | 233 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
235 | if (!rq->timeout) | 234 | if (!rq->timeout) |
@@ -311,6 +310,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
311 | rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); | 310 | rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); |
312 | if (!rq) | 311 | if (!rq) |
313 | return -ENOMEM; | 312 | return -ENOMEM; |
313 | blk_rq_set_block_pc(rq); | ||
314 | 314 | ||
315 | if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { | 315 | if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { |
316 | blk_put_request(rq); | 316 | blk_put_request(rq); |
@@ -491,7 +491,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, | |||
491 | memset(sense, 0, sizeof(sense)); | 491 | memset(sense, 0, sizeof(sense)); |
492 | rq->sense = sense; | 492 | rq->sense = sense; |
493 | rq->sense_len = 0; | 493 | rq->sense_len = 0; |
494 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 494 | blk_rq_set_block_pc(rq); |
495 | 495 | ||
496 | blk_execute_rq(q, disk, rq, 0); | 496 | blk_execute_rq(q, disk, rq, 0); |
497 | 497 | ||
@@ -524,7 +524,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, | |||
524 | int err; | 524 | int err; |
525 | 525 | ||
526 | rq = blk_get_request(q, WRITE, __GFP_WAIT); | 526 | rq = blk_get_request(q, WRITE, __GFP_WAIT); |
527 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 527 | blk_rq_set_block_pc(rq); |
528 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | 528 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
529 | rq->cmd[0] = cmd; | 529 | rq->cmd[0] = cmd; |
530 | rq->cmd[4] = data; | 530 | rq->cmd[4] = data; |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 74abd49fabdc..295f3afbbef5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <../drivers/ata/ahci.h> | 39 | #include <../drivers/ata/ahci.h> |
40 | #include <linux/export.h> | 40 | #include <linux/export.h> |
41 | #include <linux/debugfs.h> | 41 | #include <linux/debugfs.h> |
42 | #include <linux/prefetch.h> | ||
42 | #include "mtip32xx.h" | 43 | #include "mtip32xx.h" |
43 | 44 | ||
44 | #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) | 45 | #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) |
@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
2380 | /* Map the scatter list for DMA access */ | 2381 | /* Map the scatter list for DMA access */ |
2381 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); | 2382 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); |
2382 | 2383 | ||
2384 | prefetch(&port->flags); | ||
2385 | |||
2383 | command->scatter_ents = nents; | 2386 | command->scatter_ents = nents; |
2384 | 2387 | ||
2385 | /* | 2388 | /* |
@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
2392 | fis = command->command; | 2395 | fis = command->command; |
2393 | fis->type = 0x27; | 2396 | fis->type = 0x27; |
2394 | fis->opts = 1 << 7; | 2397 | fis->opts = 1 << 7; |
2395 | if (rq_data_dir(rq) == READ) | 2398 | if (dma_dir == DMA_FROM_DEVICE) |
2396 | fis->command = ATA_CMD_FPDMA_READ; | 2399 | fis->command = ATA_CMD_FPDMA_READ; |
2397 | else | 2400 | else |
2398 | fis->command = ATA_CMD_FPDMA_WRITE; | 2401 | fis->command = ATA_CMD_FPDMA_WRITE; |
@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
2412 | fis->res3 = 0; | 2415 | fis->res3 = 0; |
2413 | fill_command_sg(dd, command, nents); | 2416 | fill_command_sg(dd, command, nents); |
2414 | 2417 | ||
2415 | if (command->unaligned) | 2418 | if (unlikely(command->unaligned)) |
2416 | fis->device |= 1 << 7; | 2419 | fis->device |= 1 << 7; |
2417 | 2420 | ||
2418 | /* Populate the command header */ | 2421 | /* Populate the command header */ |
@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
2433 | * To prevent this command from being issued | 2436 | * To prevent this command from being issued |
2434 | * if an internal command is in progress or error handling is active. | 2437 | * if an internal command is in progress or error handling is active. |
2435 | */ | 2438 | */ |
2436 | if (port->flags & MTIP_PF_PAUSE_IO) { | 2439 | if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) { |
2437 | set_bit(rq->tag, port->cmds_to_issue); | 2440 | set_bit(rq->tag, port->cmds_to_issue); |
2438 | set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); | 2441 | set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); |
2439 | return; | 2442 | return; |
@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, | |||
3754 | struct driver_data *dd = hctx->queue->queuedata; | 3757 | struct driver_data *dd = hctx->queue->queuedata; |
3755 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); | 3758 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
3756 | 3759 | ||
3757 | if (!dd->unal_qdepth || rq_data_dir(rq) == READ) | 3760 | if (rq_data_dir(rq) == READ || !dd->unal_qdepth) |
3758 | return false; | 3761 | return false; |
3759 | 3762 | ||
3760 | /* | 3763 | /* |
@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
3776 | { | 3779 | { |
3777 | int ret; | 3780 | int ret; |
3778 | 3781 | ||
3779 | if (mtip_check_unal_depth(hctx, rq)) | 3782 | if (unlikely(mtip_check_unal_depth(hctx, rq))) |
3780 | return BLK_MQ_RQ_QUEUE_BUSY; | 3783 | return BLK_MQ_RQ_QUEUE_BUSY; |
3781 | 3784 | ||
3782 | ret = mtip_submit_request(hctx, rq); | 3785 | ret = mtip_submit_request(hctx, rq); |
3783 | if (!ret) | 3786 | if (likely(!ret)) |
3784 | return BLK_MQ_RQ_QUEUE_OK; | 3787 | return BLK_MQ_RQ_QUEUE_OK; |
3785 | 3788 | ||
3786 | rq->errors = ret; | 3789 | rq->errors = ret; |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 4b9b554234bc..ba1b31ee22ec 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -493,19 +493,19 @@ struct driver_data { | |||
493 | 493 | ||
494 | struct workqueue_struct *isr_workq; | 494 | struct workqueue_struct *isr_workq; |
495 | 495 | ||
496 | struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; | ||
497 | |||
498 | atomic_t irq_workers_active; | 496 | atomic_t irq_workers_active; |
499 | 497 | ||
498 | struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; | ||
499 | |||
500 | int isr_binding; | 500 | int isr_binding; |
501 | 501 | ||
502 | struct block_device *bdev; | 502 | struct block_device *bdev; |
503 | 503 | ||
504 | int unal_qdepth; /* qdepth of unaligned IO queue */ | ||
505 | |||
506 | struct list_head online_list; /* linkage for online list */ | 504 | struct list_head online_list; /* linkage for online list */ |
507 | 505 | ||
508 | struct list_head remove_list; /* linkage for removing list */ | 506 | struct list_head remove_list; /* linkage for removing list */ |
507 | |||
508 | int unal_qdepth; /* qdepth of unaligned IO queue */ | ||
509 | }; | 509 | }; |
510 | 510 | ||
511 | #endif | 511 | #endif |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index ef166ad2dbad..758ac442c5b5 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -704,6 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * | |||
704 | 704 | ||
705 | rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? | 705 | rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? |
706 | WRITE : READ, __GFP_WAIT); | 706 | WRITE : READ, __GFP_WAIT); |
707 | blk_rq_set_block_pc(rq); | ||
707 | 708 | ||
708 | if (cgc->buflen) { | 709 | if (cgc->buflen) { |
709 | ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, | 710 | ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, |
@@ -716,7 +717,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * | |||
716 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); | 717 | memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); |
717 | 718 | ||
718 | rq->timeout = 60*HZ; | 719 | rq->timeout = 60*HZ; |
719 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
720 | if (cgc->quiet) | 720 | if (cgc->quiet) |
721 | rq->cmd_flags |= REQ_QUIET; | 721 | rq->cmd_flags |= REQ_QUIET; |
722 | 722 | ||
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 2a44767891f5..898b84bba28a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -2184,6 +2184,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, | |||
2184 | ret = -ENOMEM; | 2184 | ret = -ENOMEM; |
2185 | break; | 2185 | break; |
2186 | } | 2186 | } |
2187 | blk_rq_set_block_pc(rq); | ||
2187 | 2188 | ||
2188 | ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); | 2189 | ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); |
2189 | if (ret) { | 2190 | if (ret) { |
@@ -2203,7 +2204,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, | |||
2203 | rq->cmd[9] = 0xf8; | 2204 | rq->cmd[9] = 0xf8; |
2204 | 2205 | ||
2205 | rq->cmd_len = 12; | 2206 | rq->cmd_len = 12; |
2206 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
2207 | rq->timeout = 60 * HZ; | 2207 | rq->timeout = 60 * HZ; |
2208 | bio = rq->bio; | 2208 | bio = rq->bio; |
2209 | 2209 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 5248c888552b..7bcf67eec921 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev, | |||
120 | "%s: blk_get_request failed\n", __func__); | 120 | "%s: blk_get_request failed\n", __func__); |
121 | return NULL; | 121 | return NULL; |
122 | } | 122 | } |
123 | blk_rq_set_block_pc(rq); | ||
123 | 124 | ||
124 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { | 125 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { |
125 | blk_put_request(rq); | 126 | blk_put_request(rq); |
@@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev, | |||
128 | return NULL; | 129 | return NULL; |
129 | } | 130 | } |
130 | 131 | ||
131 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
132 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 132 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
133 | REQ_FAILFAST_DRIVER; | 133 | REQ_FAILFAST_DRIVER; |
134 | rq->retries = ALUA_FAILOVER_RETRIES; | 134 | rq->retries = ALUA_FAILOVER_RETRIES; |
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index e1c8be06de9d..6f07f7fe3aa1 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c | |||
@@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, | |||
280 | return NULL; | 280 | return NULL; |
281 | } | 281 | } |
282 | 282 | ||
283 | blk_rq_set_block_pc(rq); | ||
283 | rq->cmd_len = COMMAND_SIZE(cmd); | 284 | rq->cmd_len = COMMAND_SIZE(cmd); |
284 | rq->cmd[0] = cmd; | 285 | rq->cmd[0] = cmd; |
285 | 286 | ||
@@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, | |||
304 | break; | 305 | break; |
305 | } | 306 | } |
306 | 307 | ||
307 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
308 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 308 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
309 | REQ_FAILFAST_DRIVER; | 309 | REQ_FAILFAST_DRIVER; |
310 | rq->timeout = CLARIION_TIMEOUT; | 310 | rq->timeout = CLARIION_TIMEOUT; |
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 084062bb8ee9..e9d9fea9e272 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c | |||
@@ -120,7 +120,7 @@ retry: | |||
120 | if (!req) | 120 | if (!req) |
121 | return SCSI_DH_RES_TEMP_UNAVAIL; | 121 | return SCSI_DH_RES_TEMP_UNAVAIL; |
122 | 122 | ||
123 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 123 | blk_rq_set_block_pc(req); |
124 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 124 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
125 | REQ_FAILFAST_DRIVER; | 125 | REQ_FAILFAST_DRIVER; |
126 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); | 126 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); |
@@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) | |||
250 | if (!req) | 250 | if (!req) |
251 | return SCSI_DH_RES_TEMP_UNAVAIL; | 251 | return SCSI_DH_RES_TEMP_UNAVAIL; |
252 | 252 | ||
253 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 253 | blk_rq_set_block_pc(req); |
254 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 254 | req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
255 | REQ_FAILFAST_DRIVER; | 255 | REQ_FAILFAST_DRIVER; |
256 | req->cmd_len = COMMAND_SIZE(START_STOP); | 256 | req->cmd_len = COMMAND_SIZE(START_STOP); |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 4b9cf93f3fb6..826069db9848 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
@@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev, | |||
279 | "get_rdac_req: blk_get_request failed.\n"); | 279 | "get_rdac_req: blk_get_request failed.\n"); |
280 | return NULL; | 280 | return NULL; |
281 | } | 281 | } |
282 | blk_rq_set_block_pc(rq); | ||
282 | 283 | ||
283 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { | 284 | if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { |
284 | blk_put_request(rq); | 285 | blk_put_request(rq); |
@@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev, | |||
287 | return NULL; | 288 | return NULL; |
288 | } | 289 | } |
289 | 290 | ||
290 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
291 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | | 291 | rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | |
292 | REQ_FAILFAST_DRIVER; | 292 | REQ_FAILFAST_DRIVER; |
293 | rq->retries = RDAC_RETRIES; | 293 | rq->retries = RDAC_RETRIES; |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index bac04c2335aa..5f4cbf0c4759 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write, | |||
1570 | if (unlikely(!req)) | 1570 | if (unlikely(!req)) |
1571 | return ERR_PTR(-ENOMEM); | 1571 | return ERR_PTR(-ENOMEM); |
1572 | 1572 | ||
1573 | blk_rq_set_block_pc(req); | ||
1573 | return req; | 1574 | return req; |
1574 | } | 1575 | } |
1575 | } | 1576 | } |
@@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or, | |||
1590 | } | 1591 | } |
1591 | 1592 | ||
1592 | or->request = req; | 1593 | or->request = req; |
1593 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1594 | req->cmd_flags |= REQ_QUIET; | 1594 | req->cmd_flags |= REQ_QUIET; |
1595 | 1595 | ||
1596 | req->timeout = or->timeout; | 1596 | req->timeout = or->timeout; |
@@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or, | |||
1608 | ret = PTR_ERR(req); | 1608 | ret = PTR_ERR(req); |
1609 | goto out; | 1609 | goto out; |
1610 | } | 1610 | } |
1611 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 1611 | blk_rq_set_block_pc(req); |
1612 | or->in.req = or->request->next_rq = req; | 1612 | or->in.req = or->request->next_rq = req; |
1613 | } | 1613 | } |
1614 | } else if (has_in) | 1614 | } else if (has_in) |
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 21883a2d6324..0727ea7cc387 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
@@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, | |||
365 | if (!req) | 365 | if (!req) |
366 | return DRIVER_ERROR << 24; | 366 | return DRIVER_ERROR << 24; |
367 | 367 | ||
368 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 368 | blk_rq_set_block_pc(req); |
369 | req->cmd_flags |= REQ_QUIET; | 369 | req->cmd_flags |= REQ_QUIET; |
370 | 370 | ||
371 | SRpnt->bio = NULL; | 371 | SRpnt->bio = NULL; |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 47a1ffc4c904..cbe38e5e7955 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -1952,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) | |||
1952 | */ | 1952 | */ |
1953 | req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); | 1953 | req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); |
1954 | 1954 | ||
1955 | blk_rq_set_block_pc(req); | ||
1956 | |||
1955 | req->cmd[0] = ALLOW_MEDIUM_REMOVAL; | 1957 | req->cmd[0] = ALLOW_MEDIUM_REMOVAL; |
1956 | req->cmd[1] = 0; | 1958 | req->cmd[1] = 0; |
1957 | req->cmd[2] = 0; | 1959 | req->cmd[2] = 0; |
@@ -1961,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) | |||
1961 | 1963 | ||
1962 | req->cmd_len = COMMAND_SIZE(req->cmd[0]); | 1964 | req->cmd_len = COMMAND_SIZE(req->cmd[0]); |
1963 | 1965 | ||
1964 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1965 | req->cmd_flags |= REQ_QUIET; | 1966 | req->cmd_flags |= REQ_QUIET; |
1966 | req->timeout = 10 * HZ; | 1967 | req->timeout = 10 * HZ; |
1967 | req->retries = 5; | 1968 | req->retries = 5; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index be0d5fad999d..f7e316368c99 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -195,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | |||
195 | req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); | 195 | req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); |
196 | if (!req) | 196 | if (!req) |
197 | return ret; | 197 | return ret; |
198 | blk_rq_set_block_pc(req); | ||
198 | 199 | ||
199 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, | 200 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, |
200 | buffer, bufflen, __GFP_WAIT)) | 201 | buffer, bufflen, __GFP_WAIT)) |
@@ -206,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | |||
206 | req->sense_len = 0; | 207 | req->sense_len = 0; |
207 | req->retries = retries; | 208 | req->retries = retries; |
208 | req->timeout = timeout; | 209 | req->timeout = timeout; |
209 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
210 | req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; | 210 | req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; |
211 | 211 | ||
212 | /* | 212 | /* |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index df5e961484e1..53268aaba559 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1653,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
1653 | if (!rq) | 1653 | if (!rq) |
1654 | return -ENOMEM; | 1654 | return -ENOMEM; |
1655 | 1655 | ||
1656 | blk_rq_set_block_pc(rq); | ||
1656 | memcpy(rq->cmd, cmd, hp->cmd_len); | 1657 | memcpy(rq->cmd, cmd, hp->cmd_len); |
1657 | |||
1658 | rq->cmd_len = hp->cmd_len; | 1658 | rq->cmd_len = hp->cmd_len; |
1659 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1660 | 1659 | ||
1661 | srp->rq = rq; | 1660 | srp->rq = rq; |
1662 | rq->end_io_data = srp; | 1661 | rq->end_io_data = srp; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index afc834e172c6..14eb4b256a03 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, | |||
484 | if (!req) | 484 | if (!req) |
485 | return DRIVER_ERROR << 24; | 485 | return DRIVER_ERROR << 24; |
486 | 486 | ||
487 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 487 | blk_rq_set_block_pc(req); |
488 | req->cmd_flags |= REQ_QUIET; | 488 | req->cmd_flags |= REQ_QUIET; |
489 | 489 | ||
490 | mdata->null_mapped = 1; | 490 | mdata->null_mapped = 1; |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 0f199f6a0738..94d00df28f39 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -1055,6 +1055,8 @@ pscsi_execute_cmd(struct se_cmd *cmd) | |||
1055 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1055 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1056 | goto fail; | 1056 | goto fail; |
1057 | } | 1057 | } |
1058 | |||
1059 | blk_rq_set_block_pc(req); | ||
1058 | } else { | 1060 | } else { |
1059 | BUG_ON(!cmd->data_length); | 1061 | BUG_ON(!cmd->data_length); |
1060 | 1062 | ||
@@ -1071,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd) | |||
1071 | } | 1073 | } |
1072 | } | 1074 | } |
1073 | 1075 | ||
1074 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1075 | req->end_io = pscsi_req_done; | 1076 | req->end_io = pscsi_req_done; |
1076 | req->end_io_data = cmd; | 1077 | req->end_io_data = cmd; |
1077 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | 1078 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0feedebfde48..a002cf191427 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -135,7 +135,7 @@ enum { | |||
135 | BLK_MQ_S_STOPPED = 0, | 135 | BLK_MQ_S_STOPPED = 0, |
136 | BLK_MQ_S_TAG_ACTIVE = 1, | 136 | BLK_MQ_S_TAG_ACTIVE = 1, |
137 | 137 | ||
138 | BLK_MQ_MAX_DEPTH = 2048, | 138 | BLK_MQ_MAX_DEPTH = 10240, |
139 | 139 | ||
140 | BLK_MQ_CPU_WORK_BATCH = 8, | 140 | BLK_MQ_CPU_WORK_BATCH = 8, |
141 | }; | 141 | }; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3cd426e971db..31e11051f1ba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -280,6 +280,7 @@ struct queue_limits { | |||
280 | unsigned long seg_boundary_mask; | 280 | unsigned long seg_boundary_mask; |
281 | 281 | ||
282 | unsigned int max_hw_sectors; | 282 | unsigned int max_hw_sectors; |
283 | unsigned int chunk_sectors; | ||
283 | unsigned int max_sectors; | 284 | unsigned int max_sectors; |
284 | unsigned int max_segment_size; | 285 | unsigned int max_segment_size; |
285 | unsigned int physical_block_size; | 286 | unsigned int physical_block_size; |
@@ -795,6 +796,7 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
795 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 796 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
796 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 797 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
797 | gfp_t); | 798 | gfp_t); |
799 | extern void blk_rq_set_block_pc(struct request *); | ||
798 | extern void blk_requeue_request(struct request_queue *, struct request *); | 800 | extern void blk_requeue_request(struct request_queue *, struct request *); |
799 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 801 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
800 | unsigned int len); | 802 | unsigned int len); |
@@ -910,6 +912,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
910 | return q->limits.max_sectors; | 912 | return q->limits.max_sectors; |
911 | } | 913 | } |
912 | 914 | ||
915 | /* | ||
916 | * Return maximum size of a request at given offset. Only valid for | ||
917 | * file system requests. | ||
918 | */ | ||
919 | static inline unsigned int blk_max_size_offset(struct request_queue *q, | ||
920 | sector_t offset) | ||
921 | { | ||
922 | if (!q->limits.chunk_sectors) | ||
923 | return q->limits.max_hw_sectors; | ||
924 | |||
925 | return q->limits.chunk_sectors - | ||
926 | (offset & (q->limits.chunk_sectors - 1)); | ||
927 | } | ||
928 | |||
913 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | 929 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) |
914 | { | 930 | { |
915 | struct request_queue *q = rq->q; | 931 | struct request_queue *q = rq->q; |
@@ -917,7 +933,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | |||
917 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 933 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) |
918 | return q->limits.max_hw_sectors; | 934 | return q->limits.max_hw_sectors; |
919 | 935 | ||
920 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 936 | if (!q->limits.chunk_sectors) |
937 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | ||
938 | |||
939 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | ||
940 | blk_queue_get_max_sectors(q, rq->cmd_flags)); | ||
921 | } | 941 | } |
922 | 942 | ||
923 | static inline unsigned int blk_rq_count_bios(struct request *rq) | 943 | static inline unsigned int blk_rq_count_bios(struct request *rq) |
@@ -983,6 +1003,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | |||
983 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 1003 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
984 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 1004 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); |
985 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 1005 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
1006 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); | ||
986 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 1007 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
987 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 1008 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
988 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 1009 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index df63bd3a8cf1..4ff262e2bf37 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -144,7 +144,7 @@ extern void elv_drain_elevator(struct request_queue *); | |||
144 | * io scheduler registration | 144 | * io scheduler registration |
145 | */ | 145 | */ |
146 | extern void __init load_default_elevator_module(void); | 146 | extern void __init load_default_elevator_module(void); |
147 | extern int elv_register(struct elevator_type *); | 147 | extern int __init elv_register(struct elevator_type *); |
148 | extern void elv_unregister(struct elevator_type *); | 148 | extern void elv_unregister(struct elevator_type *); |
149 | 149 | ||
150 | /* | 150 | /* |