aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c8
-rw-r--r--block/blk-softirq.c17
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--fs/bio-integrity.c22
-rw-r--r--include/linux/bio.h6
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h13
-rw-r--r--include/scsi/scsi_device.h4
12 files changed, 53 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 34d7c196338b..a0e3096c4bb5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1307 struct request_list *rl = blk_rq_rl(req); 1307 struct request_list *rl = blk_rq_rl(req);
1308 1308
1309 BUG_ON(!list_empty(&req->queuelist)); 1309 BUG_ON(!list_empty(&req->queuelist));
1310 BUG_ON(!hlist_unhashed(&req->hash)); 1310 BUG_ON(ELV_ON_HASH(req));
1311 1311
1312 blk_free_request(rl, req); 1312 blk_free_request(rl, req);
1313 freed_request(rl, flags); 1313 freed_request(rl, flags);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b1bcc619d0ea..1d2a9bdbee57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
956 unsigned int cpu) 956 unsigned int cpu)
957{ 957{
958 struct blk_mq_hw_ctx *hctx = data; 958 struct blk_mq_hw_ctx *hctx = data;
959 struct request_queue *q = hctx->queue;
959 struct blk_mq_ctx *ctx; 960 struct blk_mq_ctx *ctx;
960 LIST_HEAD(tmp); 961 LIST_HEAD(tmp);
961 962
@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
965 /* 966 /*
966 * Move ctx entries to new CPU, if this one is going away. 967 * Move ctx entries to new CPU, if this one is going away.
967 */ 968 */
968 ctx = __blk_mq_get_ctx(hctx->queue, cpu); 969 ctx = __blk_mq_get_ctx(q, cpu);
969 970
970 spin_lock(&ctx->lock); 971 spin_lock(&ctx->lock);
971 if (!list_empty(&ctx->rq_list)) { 972 if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
977 if (list_empty(&tmp)) 978 if (list_empty(&tmp))
978 return; 979 return;
979 980
980 ctx = blk_mq_get_ctx(hctx->queue); 981 ctx = blk_mq_get_ctx(q);
981 spin_lock(&ctx->lock); 982 spin_lock(&ctx->lock);
982 983
983 while (!list_empty(&tmp)) { 984 while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
988 list_move_tail(&rq->queuelist, &ctx->rq_list); 989 list_move_tail(&rq->queuelist, &ctx->rq_list);
989 } 990 }
990 991
992 hctx = q->mq_ops->map_queue(q, ctx->cpu);
991 blk_mq_hctx_mark_pending(hctx, ctx); 993 blk_mq_hctx_mark_pending(hctx, ctx);
992 994
993 spin_unlock(&ctx->lock); 995 spin_unlock(&ctx->lock);
994 blk_mq_put_ctx(ctx); 996 blk_mq_put_ctx(ctx);
997
998 blk_mq_run_hw_queue(hctx, true);
995} 999}
996 1000
997static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, 1001static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index ebd6b6f1bdeb..53b1737e978d 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
30 while (!list_empty(&local_list)) { 30 while (!list_empty(&local_list)) {
31 struct request *rq; 31 struct request *rq;
32 32
33 rq = list_entry(local_list.next, struct request, queuelist); 33 rq = list_entry(local_list.next, struct request, ipi_list);
34 list_del_init(&rq->queuelist); 34 list_del_init(&rq->ipi_list);
35 rq->q->softirq_done_fn(rq); 35 rq->q->softirq_done_fn(rq);
36 } 36 }
37} 37}
@@ -45,14 +45,9 @@ static void trigger_softirq(void *data)
45 45
46 local_irq_save(flags); 46 local_irq_save(flags);
47 list = this_cpu_ptr(&blk_cpu_done); 47 list = this_cpu_ptr(&blk_cpu_done);
48 /* 48 list_add_tail(&rq->ipi_list, list);
49 * We reuse queuelist for a list of requests to process. Since the
50 * queuelist is used by the block layer only for requests waiting to be
51 * submitted to the device it is unused now.
52 */
53 list_add_tail(&rq->queuelist, list);
54 49
55 if (list->next == &rq->queuelist) 50 if (list->next == &rq->ipi_list)
56 raise_softirq_irqoff(BLOCK_SOFTIRQ); 51 raise_softirq_irqoff(BLOCK_SOFTIRQ);
57 52
58 local_irq_restore(flags); 53 local_irq_restore(flags);
@@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req)
141 struct list_head *list; 136 struct list_head *list;
142do_local: 137do_local:
143 list = this_cpu_ptr(&blk_cpu_done); 138 list = this_cpu_ptr(&blk_cpu_done);
144 list_add_tail(&req->queuelist, list); 139 list_add_tail(&req->ipi_list, list);
145 140
146 /* 141 /*
147 * if the list only contains our just added request, 142 * if the list only contains our just added request,
@@ -149,7 +144,7 @@ do_local:
149 * entries there, someone already raised the irq but it 144 * entries there, someone already raised the irq but it
150 * hasn't run yet. 145 * hasn't run yet.
151 */ 146 */
152 if (list->next == &req->queuelist) 147 if (list->next == &req->ipi_list)
153 raise_softirq_irqoff(BLOCK_SOFTIRQ); 148 raise_softirq_irqoff(BLOCK_SOFTIRQ);
154 } else if (raise_blk_irq(ccpu, req)) 149 } else if (raise_blk_irq(ccpu, req))
155 goto do_local; 150 goto do_local;
diff --git a/block/blk.h b/block/blk.h
index d23b415b8a28..1d880f1f957f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
78/* 78/*
79 * Internal elevator interface 79 * Internal elevator interface
80 */ 80 */
81#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash) 81#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
82 82
83void blk_insert_flush(struct request *rq); 83void blk_insert_flush(struct request *rq);
84void blk_abort_flushes(struct request_queue *q); 84void blk_abort_flushes(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7d6714..1e01b66a0b92 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
247static inline void __elv_rqhash_del(struct request *rq) 247static inline void __elv_rqhash_del(struct request *rq)
248{ 248{
249 hash_del(&rq->hash); 249 hash_del(&rq->hash);
250 rq->cmd_flags &= ~REQ_HASHED;
250} 251}
251 252
252static void elv_rqhash_del(struct request_queue *q, struct request *rq) 253static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
261 262
262 BUG_ON(ELV_ON_HASH(rq)); 263 BUG_ON(ELV_ON_HASH(rq));
263 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); 264 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
265 rq->cmd_flags |= REQ_HASHED;
264} 266}
265 267
266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 268static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b94ef3..f70a230a2945 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -237,7 +237,7 @@ static int __do_lo_send_write(struct file *file,
237 file_end_write(file); 237 file_end_write(file);
238 if (likely(bw == len)) 238 if (likely(bw == len))
239 return 0; 239 return 0;
240 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", 240 printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
241 (unsigned long long)pos, len); 241 (unsigned long long)pos, len);
242 if (bw >= 0) 242 if (bw >= 0)
243 bw = -EIO; 243 bw = -EIO;
@@ -277,7 +277,7 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
277 return __do_lo_send_write(lo->lo_backing_file, 277 return __do_lo_send_write(lo->lo_backing_file,
278 page_address(page), bvec->bv_len, 278 page_address(page), bvec->bv_len,
279 pos); 279 pos);
280 printk(KERN_ERR "loop: Transfer error at byte offset %llu, " 280 printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
281 "length %i.\n", (unsigned long long)pos, bvec->bv_len); 281 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
282 if (ret > 0) 282 if (ret > 0)
283 ret = -EIO; 283 ret = -EIO;
@@ -316,7 +316,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
316out: 316out:
317 return ret; 317 return ret;
318fail: 318fail:
319 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); 319 printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
320 ret = -ENOMEM; 320 ret = -ENOMEM;
321 goto out; 321 goto out;
322} 322}
@@ -345,7 +345,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
345 size = p->bsize; 345 size = p->bsize;
346 346
347 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { 347 if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
348 printk(KERN_ERR "loop: transfer error block %ld\n", 348 printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
349 page->index); 349 page->index);
350 size = -EINVAL; 350 size = -EINVAL;
351 } 351 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5681c05ac506..65a123d9c676 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,7 +184,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
184 */ 184 */
185int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 185int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
186 int data_direction, void *buffer, unsigned bufflen, 186 int data_direction, void *buffer, unsigned bufflen,
187 unsigned char *sense, int timeout, int retries, int flags, 187 unsigned char *sense, int timeout, int retries, u64 flags,
188 int *resid) 188 int *resid)
189{ 189{
190 struct request *req; 190 struct request *req;
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(scsi_execute);
235int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 235int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
236 int data_direction, void *buffer, unsigned bufflen, 236 int data_direction, void *buffer, unsigned bufflen,
237 struct scsi_sense_hdr *sshdr, int timeout, int retries, 237 struct scsi_sense_hdr *sshdr, int timeout, int retries,
238 int *resid, int flags) 238 int *resid, u64 flags)
239{ 239{
240 char *sense = NULL; 240 char *sense = NULL;
241 int result; 241 int result;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 29696b78d1f4..1c2ce0c87711 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -182,6 +182,9 @@ static int bdev_integrity_enabled(struct block_device *bdev, int rw)
182 */ 182 */
183int bio_integrity_enabled(struct bio *bio) 183int bio_integrity_enabled(struct bio *bio)
184{ 184{
185 if (!bio_is_rw(bio))
186 return 0;
187
185 /* Already protected? */ 188 /* Already protected? */
186 if (bio_integrity(bio)) 189 if (bio_integrity(bio))
187 return 0; 190 return 0;
@@ -309,10 +312,9 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
309{ 312{
310 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 313 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
311 struct blk_integrity_exchg bix; 314 struct blk_integrity_exchg bix;
312 struct bio_vec bv; 315 struct bio_vec *bv;
313 struct bvec_iter iter;
314 sector_t sector; 316 sector_t sector;
315 unsigned int sectors, ret = 0; 317 unsigned int sectors, ret = 0, i;
316 void *prot_buf = bio->bi_integrity->bip_buf; 318 void *prot_buf = bio->bi_integrity->bip_buf;
317 319
318 if (operate) 320 if (operate)
@@ -323,16 +325,16 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
323 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 325 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
324 bix.sector_size = bi->sector_size; 326 bix.sector_size = bi->sector_size;
325 327
326 bio_for_each_segment(bv, bio, iter) { 328 bio_for_each_segment_all(bv, bio, i) {
327 void *kaddr = kmap_atomic(bv.bv_page); 329 void *kaddr = kmap_atomic(bv->bv_page);
328 bix.data_buf = kaddr + bv.bv_offset; 330 bix.data_buf = kaddr + bv->bv_offset;
329 bix.data_size = bv.bv_len; 331 bix.data_size = bv->bv_len;
330 bix.prot_buf = prot_buf; 332 bix.prot_buf = prot_buf;
331 bix.sector = sector; 333 bix.sector = sector;
332 334
333 if (operate) { 335 if (operate)
334 bi->generate_fn(&bix); 336 bi->generate_fn(&bix);
335 } else { 337 else {
336 ret = bi->verify_fn(&bix); 338 ret = bi->verify_fn(&bix);
337 if (ret) { 339 if (ret) {
338 kunmap_atomic(kaddr); 340 kunmap_atomic(kaddr);
@@ -340,7 +342,7 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
340 } 342 }
341 } 343 }
342 344
343 sectors = bv.bv_len / bi->sector_size; 345 sectors = bv->bv_len / bi->sector_size;
344 sector += sectors; 346 sector += sectors;
345 prot_buf += sectors * bi->tuple_size; 347 prot_buf += sectors * bi->tuple_size;
346 348
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a4d39b4686b..5aa372a7380c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -216,9 +216,9 @@ static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
216} 216}
217 217
218#define for_each_bvec(bvl, bio_vec, iter, start) \ 218#define for_each_bvec(bvl, bio_vec, iter, start) \
219 for ((iter) = start; \ 219 for (iter = (start); \
220 (bvl) = bvec_iter_bvec((bio_vec), (iter)), \ 220 (iter).bi_size && \
221 (iter).bi_size; \ 221 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) 222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
223 223
224 224
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index bbc3a6c88fce..aa0eaa2d0bd8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -189,6 +189,7 @@ enum rq_flag_bits {
189 __REQ_KERNEL, /* direct IO to kernel pages */ 189 __REQ_KERNEL, /* direct IO to kernel pages */
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_END, /* last of chain of requests */ 191 __REQ_END, /* last of chain of requests */
192 __REQ_HASHED, /* on IO scheduler merge hash */
192 __REQ_NR_BITS, /* stops here */ 193 __REQ_NR_BITS, /* stops here */
193}; 194};
194 195
@@ -241,5 +242,6 @@ enum rq_flag_bits {
241#define REQ_KERNEL (1ULL << __REQ_KERNEL) 242#define REQ_KERNEL (1ULL << __REQ_KERNEL)
242#define REQ_PM (1ULL << __REQ_PM) 243#define REQ_PM (1ULL << __REQ_PM)
243#define REQ_END (1ULL << __REQ_END) 244#define REQ_END (1ULL << __REQ_END)
245#define REQ_HASHED (1ULL << __REQ_HASHED)
244 246
245#endif /* __LINUX_BLK_TYPES_H */ 247#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1e1fa3f93d5f..99617cf7dd1a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -118,7 +118,18 @@ struct request {
118 struct bio *bio; 118 struct bio *bio;
119 struct bio *biotail; 119 struct bio *biotail;
120 120
121 struct hlist_node hash; /* merge hash */ 121 /*
122 * The hash is used inside the scheduler, and killed once the
123 * request reaches the dispatch list. The ipi_list is only used
124 * to queue the request for softirq completion, which is long
125 * after the request has been unhashed (and even removed from
126 * the dispatch list).
127 */
128 union {
129 struct hlist_node hash; /* merge hash */
130 struct list_head ipi_list;
131 };
132
122 /* 133 /*
123 * The rb_node is only used inside the io scheduler, requests 134 * The rb_node is only used inside the io scheduler, requests
124 * are pruned when moved to the dispatch queue. So let the 135 * are pruned when moved to the dispatch queue. So let the
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4e845b80efd3..5853c913d2b0 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -423,11 +423,11 @@ extern int scsi_is_target_device(const struct device *);
423extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 423extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
424 int data_direction, void *buffer, unsigned bufflen, 424 int data_direction, void *buffer, unsigned bufflen,
425 unsigned char *sense, int timeout, int retries, 425 unsigned char *sense, int timeout, int retries,
426 int flag, int *resid); 426 u64 flags, int *resid);
427extern int scsi_execute_req_flags(struct scsi_device *sdev, 427extern int scsi_execute_req_flags(struct scsi_device *sdev,
428 const unsigned char *cmd, int data_direction, void *buffer, 428 const unsigned char *cmd, int data_direction, void *buffer,
429 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 429 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
430 int retries, int *resid, int flags); 430 int retries, int *resid, u64 flags);
431static inline int scsi_execute_req(struct scsi_device *sdev, 431static inline int scsi_execute_req(struct scsi_device *sdev,
432 const unsigned char *cmd, int data_direction, void *buffer, 432 const unsigned char *cmd, int data_direction, void *buffer,
433 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 433 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,