diff options
author | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 |
commit | 0b98f0c04245877ae0b625a7f0aa55b8ff98e0c4 (patch) | |
tree | 486ebe0d76217a4f7781e28fbd96facb0b66f9da /block | |
parent | 67cde9c4938945b9510730c64e68d2f1dd7bc0aa (diff) | |
parent | 527e9316f8ec44bd53d90fb9f611fa7ffff52bb9 (diff) |
Merge branch 'master' into for-4.4-fixes
The following commit which went into mainline through networking tree
3b13758f51de ("cgroups: Allow dynamically changing net_classid")
conflicts in net/core/netclassid_cgroup.c with the following pending
fix in cgroup/for-4.4-fixes.
1f7dd3e5a6e4 ("cgroup: fix handling of multi-destination migration from subtree_control enabling")
The former separates out update_classid() from cgrp_attach() and
updates it to walk all fds of all tasks in the target css so that it
can be used from both migration and config change paths. The latter
drops @css from cgrp_attach().
Resolve the conflict by making cgrp_attach() call update_classid()
with the css from the first task. We can revive @tset walking in
cgrp_attach() but given that net_cls is v1 only where there always is
only one target css during migration, this is fine.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Nina Schiff <ninasc@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-merge.c | 35 | ||||
-rw-r--r-- | block/blk-mq.c | 14 | ||||
-rw-r--r-- | block/blk-settings.c | 36 | ||||
-rw-r--r-- | block/blk-sysfs.c | 3 | ||||
-rw-r--r-- | block/blk-timeout.c | 8 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/noop-iosched.c | 10 | ||||
-rw-r--r-- | block/partition-generic.c | 2 | ||||
-rw-r--r-- | block/partitions/mac.c | 10 |
10 files changed, 83 insertions, 58 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 5131993b23a1..a0af4043dda2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio) | |||
2114 | EXPORT_SYMBOL(submit_bio); | 2114 | EXPORT_SYMBOL(submit_bio); |
2115 | 2115 | ||
2116 | /** | 2116 | /** |
2117 | * blk_rq_check_limits - Helper function to check a request for the queue limit | 2117 | * blk_cloned_rq_check_limits - Helper function to check a cloned request |
2118 | * for new the queue limits | ||
2118 | * @q: the queue | 2119 | * @q: the queue |
2119 | * @rq: the request being checked | 2120 | * @rq: the request being checked |
2120 | * | 2121 | * |
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio); | |||
2125 | * after it is inserted to @q, it should be checked against @q before | 2126 | * after it is inserted to @q, it should be checked against @q before |
2126 | * the insertion using this generic function. | 2127 | * the insertion using this generic function. |
2127 | * | 2128 | * |
2128 | * This function should also be useful for request stacking drivers | ||
2129 | * in some cases below, so export this function. | ||
2130 | * Request stacking drivers like request-based dm may change the queue | 2129 | * Request stacking drivers like request-based dm may change the queue |
2131 | * limits while requests are in the queue (e.g. dm's table swapping). | 2130 | * limits when retrying requests on other queues. Those requests need |
2132 | * Such request stacking drivers should check those requests against | 2131 | * to be checked against the new queue limits again during dispatch. |
2133 | * the new queue limits again when they dispatch those requests, | ||
2134 | * although such checkings are also done against the old queue limits | ||
2135 | * when submitting requests. | ||
2136 | */ | 2132 | */ |
2137 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 2133 | static int blk_cloned_rq_check_limits(struct request_queue *q, |
2134 | struct request *rq) | ||
2138 | { | 2135 | { |
2139 | if (!rq_mergeable(rq)) | ||
2140 | return 0; | ||
2141 | |||
2142 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { | 2136 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { |
2143 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 2137 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
2144 | return -EIO; | 2138 | return -EIO; |
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |||
2158 | 2152 | ||
2159 | return 0; | 2153 | return 0; |
2160 | } | 2154 | } |
2161 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | ||
2162 | 2155 | ||
2163 | /** | 2156 | /** |
2164 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 2157 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request |
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
2170 | unsigned long flags; | 2163 | unsigned long flags; |
2171 | int where = ELEVATOR_INSERT_BACK; | 2164 | int where = ELEVATOR_INSERT_BACK; |
2172 | 2165 | ||
2173 | if (blk_rq_check_limits(q, rq)) | 2166 | if (blk_cloned_rq_check_limits(q, rq)) |
2174 | return -EIO; | 2167 | return -EIO; |
2175 | 2168 | ||
2176 | if (rq->rq_disk && | 2169 | if (rq->rq_disk && |
diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..e01405a3e8b3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
76 | struct bio_vec bv, bvprv, *bvprvp = NULL; | 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
77 | struct bvec_iter iter; | 77 | struct bvec_iter iter; |
78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
79 | unsigned front_seg_size = bio->bi_seg_front_size; | ||
80 | bool do_split = true; | ||
81 | struct bio *new = NULL; | ||
79 | 82 | ||
80 | bio_for_each_segment(bv, bio, iter) { | 83 | bio_for_each_segment(bv, bio, iter) { |
81 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) | 84 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
@@ -98,8 +101,11 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
98 | 101 | ||
99 | seg_size += bv.bv_len; | 102 | seg_size += bv.bv_len; |
100 | bvprv = bv; | 103 | bvprv = bv; |
101 | bvprvp = &bv; | 104 | bvprvp = &bvprv; |
102 | sectors += bv.bv_len >> 9; | 105 | sectors += bv.bv_len >> 9; |
106 | |||
107 | if (nsegs == 1 && seg_size > front_seg_size) | ||
108 | front_seg_size = seg_size; | ||
103 | continue; | 109 | continue; |
104 | } | 110 | } |
105 | new_segment: | 111 | new_segment: |
@@ -108,16 +114,29 @@ new_segment: | |||
108 | 114 | ||
109 | nsegs++; | 115 | nsegs++; |
110 | bvprv = bv; | 116 | bvprv = bv; |
111 | bvprvp = &bv; | 117 | bvprvp = &bvprv; |
112 | seg_size = bv.bv_len; | 118 | seg_size = bv.bv_len; |
113 | sectors += bv.bv_len >> 9; | 119 | sectors += bv.bv_len >> 9; |
120 | |||
121 | if (nsegs == 1 && seg_size > front_seg_size) | ||
122 | front_seg_size = seg_size; | ||
114 | } | 123 | } |
115 | 124 | ||
116 | *segs = nsegs; | 125 | do_split = false; |
117 | return NULL; | ||
118 | split: | 126 | split: |
119 | *segs = nsegs; | 127 | *segs = nsegs; |
120 | return bio_split(bio, sectors, GFP_NOIO, bs); | 128 | |
129 | if (do_split) { | ||
130 | new = bio_split(bio, sectors, GFP_NOIO, bs); | ||
131 | if (new) | ||
132 | bio = new; | ||
133 | } | ||
134 | |||
135 | bio->bi_seg_front_size = front_seg_size; | ||
136 | if (seg_size > bio->bi_seg_back_size) | ||
137 | bio->bi_seg_back_size = seg_size; | ||
138 | |||
139 | return do_split ? new : NULL; | ||
121 | } | 140 | } |
122 | 141 | ||
123 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 142 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
@@ -412,6 +431,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
412 | if (sg) | 431 | if (sg) |
413 | sg_mark_end(sg); | 432 | sg_mark_end(sg); |
414 | 433 | ||
434 | /* | ||
435 | * Something must have been wrong if the figured number of | ||
436 | * segment is bigger than number of req's physical segments | ||
437 | */ | ||
438 | WARN_ON(nsegs > rq->nr_phys_segments); | ||
439 | |||
415 | return nsegs; | 440 | return nsegs; |
416 | } | 441 | } |
417 | EXPORT_SYMBOL(blk_rq_map_sg); | 442 | EXPORT_SYMBOL(blk_rq_map_sg); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1291 | blk_mq_bio_to_request(rq, bio); | 1291 | blk_mq_bio_to_request(rq, bio); |
1292 | 1292 | ||
1293 | /* | 1293 | /* |
1294 | * we do limited pluging. If bio can be merged, do merge. | 1294 | * We do limited pluging. If the bio can be merged, do that. |
1295 | * Otherwise the existing request in the plug list will be | 1295 | * Otherwise the existing request in the plug list will be |
1296 | * issued. So the plug list will have one request at most | 1296 | * issued. So the plug list will have one request at most |
1297 | */ | 1297 | */ |
1298 | if (plug) { | 1298 | if (plug) { |
1299 | /* | 1299 | /* |
1300 | * The plug list might get flushed before this. If that | 1300 | * The plug list might get flushed before this. If that |
1301 | * happens, same_queue_rq is invalid and plug list is empty | 1301 | * happens, same_queue_rq is invalid and plug list is |
1302 | **/ | 1302 | * empty |
1303 | */ | ||
1303 | if (same_queue_rq && !list_empty(&plug->mq_list)) { | 1304 | if (same_queue_rq && !list_empty(&plug->mq_list)) { |
1304 | old_rq = same_queue_rq; | 1305 | old_rq = same_queue_rq; |
1305 | list_del_init(&old_rq->queuelist); | 1306 | list_del_init(&old_rq->queuelist); |
@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
1380 | blk_mq_bio_to_request(rq, bio); | 1381 | blk_mq_bio_to_request(rq, bio); |
1381 | if (!request_count) | 1382 | if (!request_count) |
1382 | trace_block_plug(q); | 1383 | trace_block_plug(q); |
1383 | else if (request_count >= BLK_MAX_REQUEST_COUNT) { | 1384 | |
1385 | blk_mq_put_ctx(data.ctx); | ||
1386 | |||
1387 | if (request_count >= BLK_MAX_REQUEST_COUNT) { | ||
1384 | blk_flush_plug_list(plug, false); | 1388 | blk_flush_plug_list(plug, false); |
1385 | trace_block_plug(q); | 1389 | trace_block_plug(q); |
1386 | } | 1390 | } |
1391 | |||
1387 | list_add_tail(&rq->queuelist, &plug->mq_list); | 1392 | list_add_tail(&rq->queuelist, &plug->mq_list); |
1388 | blk_mq_put_ctx(data.ctx); | ||
1389 | return cookie; | 1393 | return cookie; |
1390 | } | 1394 | } |
1391 | 1395 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index 7d8f129a1516..dd4973583978 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
92 | lim->virt_boundary_mask = 0; | 92 | lim->virt_boundary_mask = 0; |
93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
94 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; | 94 | lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = |
95 | BLK_SAFE_MAX_SECTORS; | ||
95 | lim->chunk_sectors = 0; | 96 | lim->chunk_sectors = 0; |
96 | lim->max_write_same_sectors = 0; | 97 | lim->max_write_same_sectors = 0; |
97 | lim->max_discard_sectors = 0; | 98 | lim->max_discard_sectors = 0; |
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) | |||
127 | lim->max_hw_sectors = UINT_MAX; | 128 | lim->max_hw_sectors = UINT_MAX; |
128 | lim->max_segment_size = UINT_MAX; | 129 | lim->max_segment_size = UINT_MAX; |
129 | lim->max_sectors = UINT_MAX; | 130 | lim->max_sectors = UINT_MAX; |
131 | lim->max_dev_sectors = UINT_MAX; | ||
130 | lim->max_write_same_sectors = UINT_MAX; | 132 | lim->max_write_same_sectors = UINT_MAX; |
131 | } | 133 | } |
132 | EXPORT_SYMBOL(blk_set_stacking_limits); | 134 | EXPORT_SYMBOL(blk_set_stacking_limits); |
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) | |||
214 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 216 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
215 | 217 | ||
216 | /** | 218 | /** |
217 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request | 219 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
218 | * @limits: the queue limits | 220 | * @q: the request queue for the device |
219 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 221 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
220 | * | 222 | * |
221 | * Description: | 223 | * Description: |
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
224 | * the device driver based upon the capabilities of the I/O | 226 | * the device driver based upon the capabilities of the I/O |
225 | * controller. | 227 | * controller. |
226 | * | 228 | * |
229 | * max_dev_sectors is a hard limit imposed by the storage device for | ||
230 | * READ/WRITE requests. It is set by the disk driver. | ||
231 | * | ||
227 | * max_sectors is a soft limit imposed by the block layer for | 232 | * max_sectors is a soft limit imposed by the block layer for |
228 | * filesystem type requests. This value can be overridden on a | 233 | * filesystem type requests. This value can be overridden on a |
229 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 234 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
230 | * The soft limit can not exceed max_hw_sectors. | 235 | * The soft limit can not exceed max_hw_sectors. |
231 | **/ | 236 | **/ |
232 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) | 237 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
233 | { | 238 | { |
239 | struct queue_limits *limits = &q->limits; | ||
240 | unsigned int max_sectors; | ||
241 | |||
234 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 242 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
235 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 243 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
236 | printk(KERN_INFO "%s: set to minimum %d\n", | 244 | printk(KERN_INFO "%s: set to minimum %d\n", |
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_ | |||
238 | } | 246 | } |
239 | 247 | ||
240 | limits->max_hw_sectors = max_hw_sectors; | 248 | limits->max_hw_sectors = max_hw_sectors; |
241 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, | 249 | max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); |
242 | BLK_DEF_MAX_SECTORS); | 250 | max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); |
243 | } | 251 | limits->max_sectors = max_sectors; |
244 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | ||
245 | |||
246 | /** | ||
247 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | ||
248 | * @q: the request queue for the device | ||
249 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | ||
250 | * | ||
251 | * Description: | ||
252 | * See description for blk_limits_max_hw_sectors(). | ||
253 | **/ | ||
254 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | ||
255 | { | ||
256 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | ||
257 | } | 252 | } |
258 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 253 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
259 | 254 | ||
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
527 | 522 | ||
528 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 523 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
529 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 524 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
525 | t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); | ||
530 | t->max_write_same_sectors = min(t->max_write_same_sectors, | 526 | t->max_write_same_sectors = min(t->max_write_same_sectors, |
531 | b->max_write_same_sectors); | 527 | b->max_write_same_sectors); |
532 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); | 528 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 565b8dac5782..e140cc487ce1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
205 | if (ret < 0) | 205 | if (ret < 0) |
206 | return ret; | 206 | return ret; |
207 | 207 | ||
208 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) | ||
209 | q->limits.max_dev_sectors >> 1); | ||
210 | |||
208 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | 211 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
209 | return -EINVAL; | 212 | return -EINVAL; |
210 | 213 | ||
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..aa40aa93381b 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) | |||
158 | { | 158 | { |
159 | if (blk_mark_rq_complete(req)) | 159 | if (blk_mark_rq_complete(req)) |
160 | return; | 160 | return; |
161 | blk_delete_timer(req); | 161 | |
162 | if (req->q->mq_ops) | 162 | if (req->q->mq_ops) { |
163 | blk_mq_rq_timed_out(req, false); | 163 | blk_mq_rq_timed_out(req, false); |
164 | else | 164 | } else { |
165 | blk_delete_timer(req); | ||
165 | blk_rq_timed_out(req); | 166 | blk_rq_timed_out(req); |
167 | } | ||
166 | } | 168 | } |
167 | EXPORT_SYMBOL_GPL(blk_abort_request); | 169 | EXPORT_SYMBOL_GPL(blk_abort_request); |
168 | 170 | ||
diff --git a/block/blk.h b/block/blk.h index da722eb786df..c43926d3d74d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq); | |||
72 | void __blk_queue_free_tags(struct request_queue *q); | 72 | void __blk_queue_free_tags(struct request_queue *q); |
73 | bool __blk_end_bidi_request(struct request *rq, int error, | 73 | bool __blk_end_bidi_request(struct request *rq, int error, |
74 | unsigned int nr_bytes, unsigned int bidi_bytes); | 74 | unsigned int nr_bytes, unsigned int bidi_bytes); |
75 | int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
76 | void blk_queue_exit(struct request_queue *q); | ||
77 | void blk_freeze_queue(struct request_queue *q); | 75 | void blk_freeze_queue(struct request_queue *q); |
78 | 76 | ||
79 | static inline void blk_queue_enter_live(struct request_queue *q) | 77 | static inline void blk_queue_enter_live(struct request_queue *q) |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, | |||
21 | static int noop_dispatch(struct request_queue *q, int force) | 21 | static int noop_dispatch(struct request_queue *q, int force) |
22 | { | 22 | { |
23 | struct noop_data *nd = q->elevator->elevator_data; | 23 | struct noop_data *nd = q->elevator->elevator_data; |
24 | struct request *rq; | ||
24 | 25 | ||
25 | if (!list_empty(&nd->queue)) { | 26 | rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); |
26 | struct request *rq; | 27 | if (rq) { |
27 | rq = list_entry(nd->queue.next, struct request, queuelist); | ||
28 | list_del_init(&rq->queuelist); | 28 | list_del_init(&rq->queuelist); |
29 | elv_dispatch_sort(q, rq); | 29 | elv_dispatch_sort(q, rq); |
30 | return 1; | 30 | return 1; |
@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) | |||
46 | 46 | ||
47 | if (rq->queuelist.prev == &nd->queue) | 47 | if (rq->queuelist.prev == &nd->queue) |
48 | return NULL; | 48 | return NULL; |
49 | return list_entry(rq->queuelist.prev, struct request, queuelist); | 49 | return list_prev_entry(rq, queuelist); |
50 | } | 50 | } |
51 | 51 | ||
52 | static struct request * | 52 | static struct request * |
@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) | |||
56 | 56 | ||
57 | if (rq->queuelist.next == &nd->queue) | 57 | if (rq->queuelist.next == &nd->queue) |
58 | return NULL; | 58 | return NULL; |
59 | return list_entry(rq->queuelist.next, struct request, queuelist); | 59 | return list_next_entry(rq, queuelist); |
60 | } | 60 | } |
61 | 61 | ||
62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) | 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 3b030157ec85..746935a5973c 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev) | |||
397 | struct hd_struct *part; | 397 | struct hd_struct *part; |
398 | int res; | 398 | int res; |
399 | 399 | ||
400 | if (bdev->bd_part_count) | 400 | if (bdev->bd_part_count || bdev->bd_super) |
401 | return -EBUSY; | 401 | return -EBUSY; |
402 | res = invalidate_partition(disk, 0); | 402 | res = invalidate_partition(disk, 0); |
403 | if (res) | 403 | if (res) |
diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c | |||
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) | |||
32 | Sector sect; | 32 | Sector sect; |
33 | unsigned char *data; | 33 | unsigned char *data; |
34 | int slot, blocks_in_map; | 34 | int slot, blocks_in_map; |
35 | unsigned secsize; | 35 | unsigned secsize, datasize, partoffset; |
36 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
37 | int found_root = 0; | 37 | int found_root = 0; |
38 | int found_root_goodness = 0; | 38 | int found_root_goodness = 0; |
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) | |||
50 | } | 50 | } |
51 | secsize = be16_to_cpu(md->block_size); | 51 | secsize = be16_to_cpu(md->block_size); |
52 | put_dev_sector(sect); | 52 | put_dev_sector(sect); |
53 | data = read_part_sector(state, secsize/512, §); | 53 | datasize = round_down(secsize, 512); |
54 | data = read_part_sector(state, datasize / 512, §); | ||
54 | if (!data) | 55 | if (!data) |
55 | return -1; | 56 | return -1; |
56 | part = (struct mac_partition *) (data + secsize%512); | 57 | partoffset = secsize % 512; |
58 | if (partoffset + sizeof(*part) > datasize) | ||
59 | return -1; | ||
60 | part = (struct mac_partition *) (data + partoffset); | ||
57 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { | 61 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { |
58 | put_dev_sector(sect); | 62 | put_dev_sector(sect); |
59 | return 0; /* not a MacOS disk */ | 63 | return 0; /* not a MacOS disk */ |