diff options
author | Ilya Dryomov <idryomov@gmail.com> | 2018-01-11 08:09:11 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-18 14:57:17 -0500 |
commit | 721c7fc701c71f693307d274d2b346a1ecd4a534 (patch) | |
tree | 02b8934cb55c6b00a533c536419e2d157d18917b | |
parent | 17534c6f2c065ad8e34ff6f013e5afaa90428512 (diff) |
block: fail op_is_write() requests to read-only partitions
Regular block device writes go through blkdev_write_iter(), which does
bdev_read_only(), while zeroout/discard/etc requests are never checked,
both userspace- and kernel-triggered. Add a generic catch-all check to
generic_make_request_checks() to actually enforce ioctl(BLKROSET) and
set_disk_ro(), which is used by quite a few drivers for things like
snapshots, read-only backing files/images, etc.
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-core.c | 56 |
1 files changed, 38 insertions, 18 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 55f338020254..c21a16e9fdf9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2062,6 +2062,21 @@ static inline bool should_fail_request(struct hd_struct *part, | |||
2062 | 2062 | ||
2063 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 2063 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
2064 | 2064 | ||
2065 | static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) | ||
2066 | { | ||
2067 | if (part->policy && op_is_write(bio_op(bio))) { | ||
2068 | char b[BDEVNAME_SIZE]; | ||
2069 | |||
2070 | printk(KERN_ERR | ||
2071 | "generic_make_request: Trying to write " | ||
2072 | "to read-only block-device %s (partno %d)\n", | ||
2073 | bio_devname(bio, b), part->partno); | ||
2074 | return true; | ||
2075 | } | ||
2076 | |||
2077 | return false; | ||
2078 | } | ||
2079 | |||
2065 | /* | 2080 | /* |
2066 | * Remap block n of partition p to block n+start(p) of the disk. | 2081 | * Remap block n of partition p to block n+start(p) of the disk. |
2067 | */ | 2082 | */ |
@@ -2070,27 +2085,28 @@ static inline int blk_partition_remap(struct bio *bio) | |||
2070 | struct hd_struct *p; | 2085 | struct hd_struct *p; |
2071 | int ret = 0; | 2086 | int ret = 0; |
2072 | 2087 | ||
2088 | rcu_read_lock(); | ||
2089 | p = __disk_get_part(bio->bi_disk, bio->bi_partno); | ||
2090 | if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) || | ||
2091 | bio_check_ro(bio, p))) { | ||
2092 | ret = -EIO; | ||
2093 | goto out; | ||
2094 | } | ||
2095 | |||
2073 | /* | 2096 | /* |
2074 | * Zone reset does not include bi_size so bio_sectors() is always 0. | 2097 | * Zone reset does not include bi_size so bio_sectors() is always 0. |
2075 | * Include a test for the reset op code and perform the remap if needed. | 2098 | * Include a test for the reset op code and perform the remap if needed. |
2076 | */ | 2099 | */ |
2077 | if (!bio->bi_partno || | 2100 | if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET) |
2078 | (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)) | 2101 | goto out; |
2079 | return 0; | ||
2080 | 2102 | ||
2081 | rcu_read_lock(); | 2103 | bio->bi_iter.bi_sector += p->start_sect; |
2082 | p = __disk_get_part(bio->bi_disk, bio->bi_partno); | 2104 | bio->bi_partno = 0; |
2083 | if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) { | 2105 | trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), |
2084 | bio->bi_iter.bi_sector += p->start_sect; | 2106 | bio->bi_iter.bi_sector - p->start_sect); |
2085 | bio->bi_partno = 0; | ||
2086 | trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), | ||
2087 | bio->bi_iter.bi_sector - p->start_sect); | ||
2088 | } else { | ||
2089 | printk("%s: fail for partition %d\n", __func__, bio->bi_partno); | ||
2090 | ret = -EIO; | ||
2091 | } | ||
2092 | rcu_read_unlock(); | ||
2093 | 2107 | ||
2108 | out: | ||
2109 | rcu_read_unlock(); | ||
2094 | return ret; | 2110 | return ret; |
2095 | } | 2111 | } |
2096 | 2112 | ||
@@ -2149,15 +2165,19 @@ generic_make_request_checks(struct bio *bio) | |||
2149 | * For a REQ_NOWAIT based request, return -EOPNOTSUPP | 2165 | * For a REQ_NOWAIT based request, return -EOPNOTSUPP |
2150 | * if queue is not a request based queue. | 2166 | * if queue is not a request based queue. |
2151 | */ | 2167 | */ |
2152 | |||
2153 | if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) | 2168 | if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) |
2154 | goto not_supported; | 2169 | goto not_supported; |
2155 | 2170 | ||
2156 | if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) | 2171 | if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) |
2157 | goto end_io; | 2172 | goto end_io; |
2158 | 2173 | ||
2159 | if (blk_partition_remap(bio)) | 2174 | if (!bio->bi_partno) { |
2160 | goto end_io; | 2175 | if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) |
2176 | goto end_io; | ||
2177 | } else { | ||
2178 | if (blk_partition_remap(bio)) | ||
2179 | goto end_io; | ||
2180 | } | ||
2161 | 2181 | ||
2162 | if (bio_check_eod(bio, nr_sectors)) | 2182 | if (bio_check_eod(bio, nr_sectors)) |
2163 | goto end_io; | 2183 | goto end_io; |