aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-04 14:16:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-04 14:16:35 -0500
commit64b28683deba132f301d1cecfc25c32e295f53a1 (patch)
treebe38a4e77c530fb129339f983a9d307c60312df8
parentd3658c2266012f270da52e3e0365536e394bd3bd (diff)
parent1d51877578799bfe0fcfe189d8233c9fccf05931 (diff)
Merge tag 'for-linus-20180204' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "Most of this is fixes and not new code/features: - skd fix from Arnd, fixing a build error dependent on sla allocator type. - blk-mq scheduler discard merging fixes, one from me and one from Keith. This fixes a segment miscalculation for blk-mq-sched, where we mistakenly think two segments are physically contigious even though the request isn't carrying real data. Also fixes a bio-to-rq merge case. - Don't re-set a bit on the buffer_head flags, if it's already set. This can cause scalability concerns on bigger machines and workloads. From Kemi Wang. - Add BLK_STS_DEV_RESOURCE return value to blk-mq, allowing us to distuingish between a local (device related) resource starvation and a global one. The latter might happen without IO being in flight, so it has to be handled a bit differently. From Ming" * tag 'for-linus-20180204' of git://git.kernel.dk/linux-block: block: skd: fix incorrect linux/slab_def.h inclusion buffer: Avoid setting buffer bits that are already set blk-mq-sched: Enable merging discard bio into request blk-mq: fix discard merge with scheduler attached blk-mq: introduce BLK_STS_DEV_RESOURCE
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-merge.c29
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq.c20
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/skd_main.c7
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/md/dm-rq.c5
-rw-r--r--drivers/nvme/host/fc.c12
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--include/linux/blk_types.h18
-rw-r--r--include/linux/buffer_head.h5
13 files changed, 83 insertions, 30 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index a2005a485335..d0d104268f1a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -145,6 +145,7 @@ static const struct {
145 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 145 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
146 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 146 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
147 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 147 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
148 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
148 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" }, 149 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
149 150
150 /* device mapper special case, should not leak out: */ 151 /* device mapper special case, should not leak out: */
@@ -3282,6 +3283,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3282{ 3283{
3283 if (bio_has_data(bio)) 3284 if (bio_has_data(bio))
3284 rq->nr_phys_segments = bio_phys_segments(q, bio); 3285 rq->nr_phys_segments = bio_phys_segments(q, bio);
3286 else if (bio_op(bio) == REQ_OP_DISCARD)
3287 rq->nr_phys_segments = 1;
3285 3288
3286 rq->__data_len = bio->bi_iter.bi_size; 3289 rq->__data_len = bio->bi_iter.bi_size;
3287 rq->bio = rq->biotail = bio; 3290 rq->bio = rq->biotail = bio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8452fc7164cc..782940c65d8a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -550,6 +550,24 @@ static bool req_no_special_merge(struct request *req)
550 return !q->mq_ops && req->special; 550 return !q->mq_ops && req->special;
551} 551}
552 552
553static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
554 struct request *next)
555{
556 unsigned short segments = blk_rq_nr_discard_segments(req);
557
558 if (segments >= queue_max_discard_segments(q))
559 goto no_merge;
560 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
561 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
562 goto no_merge;
563
564 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
565 return true;
566no_merge:
567 req_set_nomerge(q, req);
568 return false;
569}
570
553static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 571static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
554 struct request *next) 572 struct request *next)
555{ 573{
@@ -683,9 +701,13 @@ static struct request *attempt_merge(struct request_queue *q,
683 * If we are allowed to merge, then append bio list 701 * If we are allowed to merge, then append bio list
684 * from next to rq and release next. merge_requests_fn 702 * from next to rq and release next. merge_requests_fn
685 * will have updated segment counts, update sector 703 * will have updated segment counts, update sector
686 * counts here. 704 * counts here. Handle DISCARDs separately, as they
705 * have separate settings.
687 */ 706 */
688 if (!ll_merge_requests_fn(q, req, next)) 707 if (req_op(req) == REQ_OP_DISCARD) {
708 if (!req_attempt_discard_merge(q, req, next))
709 return NULL;
710 } else if (!ll_merge_requests_fn(q, req, next))
689 return NULL; 711 return NULL;
690 712
691 /* 713 /*
@@ -715,7 +737,8 @@ static struct request *attempt_merge(struct request_queue *q,
715 737
716 req->__data_len += blk_rq_bytes(next); 738 req->__data_len += blk_rq_bytes(next);
717 739
718 elv_merge_requests(q, req, next); 740 if (req_op(req) != REQ_OP_DISCARD)
741 elv_merge_requests(q, req, next);
719 742
720 /* 743 /*
721 * 'next' is going away, so update stats accordingly 744 * 'next' is going away, so update stats accordingly
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 55c0a745b427..25c14c58385c 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -259,6 +259,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
259 if (!*merged_request) 259 if (!*merged_request)
260 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); 260 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
261 return true; 261 return true;
262 case ELEVATOR_DISCARD_MERGE:
263 return bio_attempt_discard_merge(q, rq, bio);
262 default: 264 default:
263 return false; 265 return false;
264 } 266 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 01f271d40825..df93102e2149 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1162,6 +1162,8 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1162 return true; 1162 return true;
1163} 1163}
1164 1164
1165#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1166
1165bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, 1167bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1166 bool got_budget) 1168 bool got_budget)
1167{ 1169{
@@ -1169,6 +1171,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1169 struct request *rq, *nxt; 1171 struct request *rq, *nxt;
1170 bool no_tag = false; 1172 bool no_tag = false;
1171 int errors, queued; 1173 int errors, queued;
1174 blk_status_t ret = BLK_STS_OK;
1172 1175
1173 if (list_empty(list)) 1176 if (list_empty(list))
1174 return false; 1177 return false;
@@ -1181,7 +1184,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1181 errors = queued = 0; 1184 errors = queued = 0;
1182 do { 1185 do {
1183 struct blk_mq_queue_data bd; 1186 struct blk_mq_queue_data bd;
1184 blk_status_t ret;
1185 1187
1186 rq = list_first_entry(list, struct request, queuelist); 1188 rq = list_first_entry(list, struct request, queuelist);
1187 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 1189 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -1226,7 +1228,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1226 } 1228 }
1227 1229
1228 ret = q->mq_ops->queue_rq(hctx, &bd); 1230 ret = q->mq_ops->queue_rq(hctx, &bd);
1229 if (ret == BLK_STS_RESOURCE) { 1231 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1230 /* 1232 /*
1231 * If an I/O scheduler has been configured and we got a 1233 * If an I/O scheduler has been configured and we got a
1232 * driver tag for the next request already, free it 1234 * driver tag for the next request already, free it
@@ -1257,6 +1259,8 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1257 * that is where we will continue on next queue run. 1259 * that is where we will continue on next queue run.
1258 */ 1260 */
1259 if (!list_empty(list)) { 1261 if (!list_empty(list)) {
1262 bool needs_restart;
1263
1260 spin_lock(&hctx->lock); 1264 spin_lock(&hctx->lock);
1261 list_splice_init(list, &hctx->dispatch); 1265 list_splice_init(list, &hctx->dispatch);
1262 spin_unlock(&hctx->lock); 1266 spin_unlock(&hctx->lock);
@@ -1280,10 +1284,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1280 * - Some but not all block drivers stop a queue before 1284 * - Some but not all block drivers stop a queue before
1281 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq 1285 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1282 * and dm-rq. 1286 * and dm-rq.
1287 *
1288 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1289 * bit is set, run queue after a delay to avoid IO stalls
1290 * that could otherwise occur if the queue is idle.
1283 */ 1291 */
1284 if (!blk_mq_sched_needs_restart(hctx) || 1292 needs_restart = blk_mq_sched_needs_restart(hctx);
1293 if (!needs_restart ||
1285 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) 1294 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1286 blk_mq_run_hw_queue(hctx, true); 1295 blk_mq_run_hw_queue(hctx, true);
1296 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1297 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1287 } 1298 }
1288 1299
1289 return (queued + errors) != 0; 1300 return (queued + errors) != 0;
@@ -1764,6 +1775,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1764 *cookie = new_cookie; 1775 *cookie = new_cookie;
1765 break; 1776 break;
1766 case BLK_STS_RESOURCE: 1777 case BLK_STS_RESOURCE:
1778 case BLK_STS_DEV_RESOURCE:
1767 __blk_mq_requeue_request(rq); 1779 __blk_mq_requeue_request(rq);
1768 break; 1780 break;
1769 default: 1781 default:
@@ -1826,7 +1838,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1826 hctx_lock(hctx, &srcu_idx); 1838 hctx_lock(hctx, &srcu_idx);
1827 1839
1828 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1840 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1829 if (ret == BLK_STS_RESOURCE) 1841 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1830 blk_mq_sched_insert_request(rq, false, true, false); 1842 blk_mq_sched_insert_request(rq, false, true, false);
1831 else if (ret != BLK_STS_OK) 1843 else if (ret != BLK_STS_OK)
1832 blk_mq_end_request(rq, ret); 1844 blk_mq_end_request(rq, ret);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6655893a3a7a..287a09611c0f 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1230,7 +1230,7 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1230 return BLK_STS_OK; 1230 return BLK_STS_OK;
1231 } else 1231 } else
1232 /* requeue request */ 1232 /* requeue request */
1233 return BLK_STS_RESOURCE; 1233 return BLK_STS_DEV_RESOURCE;
1234 } 1234 }
1235 } 1235 }
1236 1236
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index de0d08133c7e..e41935ab41ef 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -32,7 +32,6 @@
32#include <linux/aer.h> 32#include <linux/aer.h>
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/stringify.h> 34#include <linux/stringify.h>
35#include <linux/slab_def.h>
36#include <scsi/scsi.h> 35#include <scsi/scsi.h>
37#include <scsi/sg.h> 36#include <scsi/sg.h>
38#include <linux/io.h> 37#include <linux/io.h>
@@ -2603,7 +2602,8 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2603 buf = kmem_cache_alloc(s, gfp); 2602 buf = kmem_cache_alloc(s, gfp);
2604 if (!buf) 2603 if (!buf)
2605 return NULL; 2604 return NULL;
2606 *dma_handle = dma_map_single(dev, buf, s->size, dir); 2605 *dma_handle = dma_map_single(dev, buf,
2606 kmem_cache_size(s), dir);
2607 if (dma_mapping_error(dev, *dma_handle)) { 2607 if (dma_mapping_error(dev, *dma_handle)) {
2608 kmem_cache_free(s, buf); 2608 kmem_cache_free(s, buf);
2609 buf = NULL; 2609 buf = NULL;
@@ -2618,7 +2618,8 @@ static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2618 if (!vaddr) 2618 if (!vaddr)
2619 return; 2619 return;
2620 2620
2621 dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir); 2621 dma_unmap_single(&skdev->pdev->dev, dma_handle,
2622 kmem_cache_size(s), dir);
2622 kmem_cache_free(s, vaddr); 2623 kmem_cache_free(s, vaddr);
2623} 2624}
2624 2625
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 68846897d213..79908e6ddbf2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -276,7 +276,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
276 /* Out of mem doesn't actually happen, since we fall back 276 /* Out of mem doesn't actually happen, since we fall back
277 * to direct descriptors */ 277 * to direct descriptors */
278 if (err == -ENOMEM || err == -ENOSPC) 278 if (err == -ENOMEM || err == -ENOSPC)
279 return BLK_STS_RESOURCE; 279 return BLK_STS_DEV_RESOURCE;
280 return BLK_STS_IOERR; 280 return BLK_STS_IOERR;
281 } 281 }
282 282
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 891265acb10e..e126e4cac2ca 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -911,7 +911,7 @@ out_err:
911out_busy: 911out_busy:
912 blk_mq_stop_hw_queue(hctx); 912 blk_mq_stop_hw_queue(hctx);
913 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 913 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
914 return BLK_STS_RESOURCE; 914 return BLK_STS_DEV_RESOURCE;
915} 915}
916 916
917static void blkif_complete_rq(struct request *rq) 917static void blkif_complete_rq(struct request *rq)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index aeaaaef43eff..bf0b840645cc 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -408,7 +408,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ
408 408
409 clone->start_time = jiffies; 409 clone->start_time = jiffies;
410 r = blk_insert_cloned_request(clone->q, clone); 410 r = blk_insert_cloned_request(clone->q, clone);
411 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE) 411 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
412 /* must complete clone in terms of original request */ 412 /* must complete clone in terms of original request */
413 dm_complete_request(rq, r); 413 dm_complete_request(rq, r);
414 return r; 414 return r;
@@ -500,7 +500,7 @@ check_again:
500 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 500 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
501 blk_rq_pos(rq)); 501 blk_rq_pos(rq));
502 ret = dm_dispatch_clone_request(clone, rq); 502 ret = dm_dispatch_clone_request(clone, rq);
503 if (ret == BLK_STS_RESOURCE) { 503 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
504 blk_rq_unprep_clone(clone); 504 blk_rq_unprep_clone(clone);
505 tio->ti->type->release_clone_rq(clone); 505 tio->ti->type->release_clone_rq(clone);
506 tio->clone = NULL; 506 tio->clone = NULL;
@@ -772,7 +772,6 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
772 /* Undo dm_start_request() before requeuing */ 772 /* Undo dm_start_request() before requeuing */
773 rq_end_stats(md, rq); 773 rq_end_stats(md, rq);
774 rq_completed(md, rq_data_dir(rq), false); 774 rq_completed(md, rq_data_dir(rq), false);
775 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
776 return BLK_STS_RESOURCE; 775 return BLK_STS_RESOURCE;
777 } 776 }
778 777
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 99bf51c7e513..b856d7c919d2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -35,8 +35,6 @@ enum nvme_fc_queue_flags {
35 NVME_FC_Q_LIVE, 35 NVME_FC_Q_LIVE,
36}; 36};
37 37
38#define NVMEFC_QUEUE_DELAY 3 /* ms units */
39
40#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 38#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
41 39
42struct nvme_fc_queue { 40struct nvme_fc_queue {
@@ -2231,7 +2229,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2231 * the target device is present 2229 * the target device is present
2232 */ 2230 */
2233 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2231 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2234 goto busy; 2232 return BLK_STS_RESOURCE;
2235 2233
2236 if (!nvme_fc_ctrl_get(ctrl)) 2234 if (!nvme_fc_ctrl_get(ctrl))
2237 return BLK_STS_IOERR; 2235 return BLK_STS_IOERR;
@@ -2311,16 +2309,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2311 ret != -EBUSY) 2309 ret != -EBUSY)
2312 return BLK_STS_IOERR; 2310 return BLK_STS_IOERR;
2313 2311
2314 goto busy; 2312 return BLK_STS_RESOURCE;
2315 } 2313 }
2316 2314
2317 return BLK_STS_OK; 2315 return BLK_STS_OK;
2318
2319busy:
2320 if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
2321 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
2322
2323 return BLK_STS_RESOURCE;
2324} 2316}
2325 2317
2326static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, 2318static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8efe4731ed89..a86df9ca7d1c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2047,9 +2047,9 @@ out_put_budget:
2047 case BLK_STS_OK: 2047 case BLK_STS_OK:
2048 break; 2048 break;
2049 case BLK_STS_RESOURCE: 2049 case BLK_STS_RESOURCE:
2050 if (atomic_read(&sdev->device_busy) == 0 && 2050 if (atomic_read(&sdev->device_busy) ||
2051 !scsi_device_blocked(sdev)) 2051 scsi_device_blocked(sdev))
2052 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); 2052 ret = BLK_STS_DEV_RESOURCE;
2053 break; 2053 break;
2054 default: 2054 default:
2055 /* 2055 /*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index c5d3db0d83f8..bf18b95ed92d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,24 @@ typedef u8 __bitwise blk_status_t;
39 39
40#define BLK_STS_AGAIN ((__force blk_status_t)12) 40#define BLK_STS_AGAIN ((__force blk_status_t)12)
41 41
42/*
43 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
44 * device related resources are unavailable, but the driver can guarantee
45 * that the queue will be rerun in the future once resources become
46 * available again. This is typically the case for device specific
47 * resources that are consumed for IO. If the driver fails allocating these
48 * resources, we know that inflight (or pending) IO will free these
49 * resource upon completion.
50 *
51 * This is different from BLK_STS_RESOURCE in that it explicitly references
52 * a device specific resource. For resources of wider scope, allocation
53 * failure can happen without having pending IO. This means that we can't
54 * rely on request completions freeing these resources, as IO may not be in
55 * flight. Examples of that are kernel memory allocations, DMA mappings, or
56 * any other system wide resources.
57 */
58#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
59
42/** 60/**
43 * blk_path_error - returns true if error may be path related 61 * blk_path_error - returns true if error may be path related
44 * @error: status the request was completed with 62 * @error: status the request was completed with
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 58a82f58e44e..894e5d125de6 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -81,11 +81,14 @@ struct buffer_head {
81/* 81/*
82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83 * and buffer_foo() functions. 83 * and buffer_foo() functions.
84 * To avoid reset buffer flags that are already set, because that causes
85 * a costly cache line transition, check the flag first.
84 */ 86 */
85#define BUFFER_FNS(bit, name) \ 87#define BUFFER_FNS(bit, name) \
86static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 88static __always_inline void set_buffer_##name(struct buffer_head *bh) \
87{ \ 89{ \
88 set_bit(BH_##bit, &(bh)->b_state); \ 90 if (!test_bit(BH_##bit, &(bh)->b_state)) \
91 set_bit(BH_##bit, &(bh)->b_state); \
89} \ 92} \
90static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 93static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
91{ \ 94{ \