diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-07 18:34:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-07 18:34:09 -0400 |
commit | ac904ae6e6f0a56be7b9a1cf66fbd50dd025fb06 (patch) | |
tree | 381a6ad5886a0da78392c2f5bb817dd5d75ea97f | |
parent | 4c2a8499a450b6582eb5637a8f0d472168355ddd (diff) | |
parent | 8ba8682107ee2ca3347354e018865d8e1967c5f4 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block IO fixes from Jens Axboe:
"Three small fixes that have been queued up and tested for this series:
- A bug fix for xen-blkfront from Bob Liu, fixing an issue with
incomplete requests during migration.
- A fix for an ancient issue in retrieving the IO priority of a
different PID than self, preventing that task from going away while
we access it. From Omar.
- A writeback fix from Tahsin, fixing a case where we'd call ihold()
with a zero ref count inode"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: fix use-after-free in sys_ioprio_get()
writeback: inode cgroup wb switch should not call ihold()
xen-blkfront: save uncompleted reqs in blkfront_resume()
-rw-r--r-- | block/ioprio.c | 2 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 91 | ||||
-rw-r--r-- | fs/fs-writeback.c | 2 |
3 files changed, 43 insertions, 52 deletions
diff --git a/block/ioprio.c b/block/ioprio.c index cc7800e9eb44..01b8116298a1 100644 --- a/block/ioprio.c +++ b/block/ioprio.c | |||
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p) | |||
150 | if (ret) | 150 | if (ret) |
151 | goto out; | 151 | goto out; |
152 | ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); | 152 | ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); |
153 | task_lock(p); | ||
153 | if (p->io_context) | 154 | if (p->io_context) |
154 | ret = p->io_context->ioprio; | 155 | ret = p->io_context->ioprio; |
156 | task_unlock(p); | ||
155 | out: | 157 | out: |
156 | return ret; | 158 | return ret; |
157 | } | 159 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2e6d1e9c3345..fcc5b4e0aef2 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -207,6 +207,9 @@ struct blkfront_info | |||
207 | struct blk_mq_tag_set tag_set; | 207 | struct blk_mq_tag_set tag_set; |
208 | struct blkfront_ring_info *rinfo; | 208 | struct blkfront_ring_info *rinfo; |
209 | unsigned int nr_rings; | 209 | unsigned int nr_rings; |
210 | /* Save uncomplete reqs and bios for migration. */ | ||
211 | struct list_head requests; | ||
212 | struct bio_list bio_list; | ||
210 | }; | 213 | }; |
211 | 214 | ||
212 | static unsigned int nr_minors; | 215 | static unsigned int nr_minors; |
@@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info) | |||
2002 | { | 2005 | { |
2003 | unsigned int i, r_index; | 2006 | unsigned int i, r_index; |
2004 | struct request *req, *n; | 2007 | struct request *req, *n; |
2005 | struct blk_shadow *copy; | ||
2006 | int rc; | 2008 | int rc; |
2007 | struct bio *bio, *cloned_bio; | 2009 | struct bio *bio, *cloned_bio; |
2008 | struct bio_list bio_list, merge_bio; | ||
2009 | unsigned int segs, offset; | 2010 | unsigned int segs, offset; |
2010 | int pending, size; | 2011 | int pending, size; |
2011 | struct split_bio *split_bio; | 2012 | struct split_bio *split_bio; |
2012 | struct list_head requests; | ||
2013 | 2013 | ||
2014 | blkfront_gather_backend_features(info); | 2014 | blkfront_gather_backend_features(info); |
2015 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2015 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2016 | blk_queue_max_segments(info->rq, segs); | 2016 | blk_queue_max_segments(info->rq, segs); |
2017 | bio_list_init(&bio_list); | ||
2018 | INIT_LIST_HEAD(&requests); | ||
2019 | 2017 | ||
2020 | for (r_index = 0; r_index < info->nr_rings; r_index++) { | 2018 | for (r_index = 0; r_index < info->nr_rings; r_index++) { |
2021 | struct blkfront_ring_info *rinfo; | 2019 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; |
2022 | |||
2023 | rinfo = &info->rinfo[r_index]; | ||
2024 | /* Stage 1: Make a safe copy of the shadow state. */ | ||
2025 | copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow), | ||
2026 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); | ||
2027 | if (!copy) | ||
2028 | return -ENOMEM; | ||
2029 | |||
2030 | /* Stage 2: Set up free list. */ | ||
2031 | memset(&rinfo->shadow, 0, sizeof(rinfo->shadow)); | ||
2032 | for (i = 0; i < BLK_RING_SIZE(info); i++) | ||
2033 | rinfo->shadow[i].req.u.rw.id = i+1; | ||
2034 | rinfo->shadow_free = rinfo->ring.req_prod_pvt; | ||
2035 | rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; | ||
2036 | 2020 | ||
2037 | rc = blkfront_setup_indirect(rinfo); | 2021 | rc = blkfront_setup_indirect(rinfo); |
2038 | if (rc) { | 2022 | if (rc) |
2039 | kfree(copy); | ||
2040 | return rc; | 2023 | return rc; |
2041 | } | ||
2042 | |||
2043 | for (i = 0; i < BLK_RING_SIZE(info); i++) { | ||
2044 | /* Not in use? */ | ||
2045 | if (!copy[i].request) | ||
2046 | continue; | ||
2047 | |||
2048 | /* | ||
2049 | * Get the bios in the request so we can re-queue them. | ||
2050 | */ | ||
2051 | if (copy[i].request->cmd_flags & | ||
2052 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { | ||
2053 | /* | ||
2054 | * Flush operations don't contain bios, so | ||
2055 | * we need to requeue the whole request | ||
2056 | */ | ||
2057 | list_add(©[i].request->queuelist, &requests); | ||
2058 | continue; | ||
2059 | } | ||
2060 | merge_bio.head = copy[i].request->bio; | ||
2061 | merge_bio.tail = copy[i].request->biotail; | ||
2062 | bio_list_merge(&bio_list, &merge_bio); | ||
2063 | copy[i].request->bio = NULL; | ||
2064 | blk_end_request_all(copy[i].request, 0); | ||
2065 | } | ||
2066 | |||
2067 | kfree(copy); | ||
2068 | } | 2024 | } |
2069 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 2025 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
2070 | 2026 | ||
@@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
2079 | kick_pending_request_queues(rinfo); | 2035 | kick_pending_request_queues(rinfo); |
2080 | } | 2036 | } |
2081 | 2037 | ||
2082 | list_for_each_entry_safe(req, n, &requests, queuelist) { | 2038 | list_for_each_entry_safe(req, n, &info->requests, queuelist) { |
2083 | /* Requeue pending requests (flush or discard) */ | 2039 | /* Requeue pending requests (flush or discard) */ |
2084 | list_del_init(&req->queuelist); | 2040 | list_del_init(&req->queuelist); |
2085 | BUG_ON(req->nr_phys_segments > segs); | 2041 | BUG_ON(req->nr_phys_segments > segs); |
@@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
2087 | } | 2043 | } |
2088 | blk_mq_kick_requeue_list(info->rq); | 2044 | blk_mq_kick_requeue_list(info->rq); |
2089 | 2045 | ||
2090 | while ((bio = bio_list_pop(&bio_list)) != NULL) { | 2046 | while ((bio = bio_list_pop(&info->bio_list)) != NULL) { |
2091 | /* Traverse the list of pending bios and re-queue them */ | 2047 | /* Traverse the list of pending bios and re-queue them */ |
2092 | if (bio_segments(bio) > segs) { | 2048 | if (bio_segments(bio) > segs) { |
2093 | /* | 2049 | /* |
@@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2133 | { | 2089 | { |
2134 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); | 2090 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
2135 | int err = 0; | 2091 | int err = 0; |
2092 | unsigned int i, j; | ||
2136 | 2093 | ||
2137 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); | 2094 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); |
2138 | 2095 | ||
2096 | bio_list_init(&info->bio_list); | ||
2097 | INIT_LIST_HEAD(&info->requests); | ||
2098 | for (i = 0; i < info->nr_rings; i++) { | ||
2099 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | ||
2100 | struct bio_list merge_bio; | ||
2101 | struct blk_shadow *shadow = rinfo->shadow; | ||
2102 | |||
2103 | for (j = 0; j < BLK_RING_SIZE(info); j++) { | ||
2104 | /* Not in use? */ | ||
2105 | if (!shadow[j].request) | ||
2106 | continue; | ||
2107 | |||
2108 | /* | ||
2109 | * Get the bios in the request so we can re-queue them. | ||
2110 | */ | ||
2111 | if (shadow[j].request->cmd_flags & | ||
2112 | (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { | ||
2113 | /* | ||
2114 | * Flush operations don't contain bios, so | ||
2115 | * we need to requeue the whole request | ||
2116 | */ | ||
2117 | list_add(&shadow[j].request->queuelist, &info->requests); | ||
2118 | continue; | ||
2119 | } | ||
2120 | merge_bio.head = shadow[j].request->bio; | ||
2121 | merge_bio.tail = shadow[j].request->biotail; | ||
2122 | bio_list_merge(&info->bio_list, &merge_bio); | ||
2123 | shadow[j].request->bio = NULL; | ||
2124 | blk_mq_end_request(shadow[j].request, 0); | ||
2125 | } | ||
2126 | } | ||
2127 | |||
2139 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 2128 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
2140 | 2129 | ||
2141 | err = negotiate_mq(info); | 2130 | err = negotiate_mq(info); |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 989a2cef6b76..fe7e83a45eff 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) | |||
483 | goto out_free; | 483 | goto out_free; |
484 | } | 484 | } |
485 | inode->i_state |= I_WB_SWITCH; | 485 | inode->i_state |= I_WB_SWITCH; |
486 | __iget(inode); | ||
486 | spin_unlock(&inode->i_lock); | 487 | spin_unlock(&inode->i_lock); |
487 | 488 | ||
488 | ihold(inode); | ||
489 | isw->inode = inode; | 489 | isw->inode = inode; |
490 | 490 | ||
491 | atomic_inc(&isw_nr_in_flight); | 491 | atomic_inc(&isw_nr_in_flight); |