aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-core.c265
-rw-r--r--block/cfq-iosched.c7
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c26
-rw-r--r--block/partitions/efi.c4
6 files changed, 229 insertions, 79 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b2b9837f9dd3..e8918ffaf96d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q,
972 if (!new_blkg) 972 if (!new_blkg)
973 return -ENOMEM; 973 return -ENOMEM;
974 974
975 preloaded = !radix_tree_preload(GFP_KERNEL);
976
977 blk_queue_bypass_start(q); 975 blk_queue_bypass_start(q);
978 976
977 preloaded = !radix_tree_preload(GFP_KERNEL);
978
979 /* 979 /*
980 * Make sure the root blkg exists and count the existing blkgs. As 980 * Make sure the root blkg exists and count the existing blkgs. As
981 * @q is bypassing at this point, blkg_lookup_create() can't be 981 * @q is bypassing at this point, blkg_lookup_create() can't be
diff --git a/block/blk-core.c b/block/blk-core.c
index 7c288358a745..33c33bc99ddd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -30,6 +30,7 @@
30#include <linux/list_sort.h> 30#include <linux/list_sort.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/pm_runtime.h>
33 34
34#define CREATE_TRACE_POINTS 35#define CREATE_TRACE_POINTS
35#include <trace/events/block.h> 36#include <trace/events/block.h>
@@ -159,20 +160,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
159 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 160 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
160 error = -EIO; 161 error = -EIO;
161 162
162 if (unlikely(nbytes > bio->bi_size)) {
163 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
164 __func__, nbytes, bio->bi_size);
165 nbytes = bio->bi_size;
166 }
167
168 if (unlikely(rq->cmd_flags & REQ_QUIET)) 163 if (unlikely(rq->cmd_flags & REQ_QUIET))
169 set_bit(BIO_QUIET, &bio->bi_flags); 164 set_bit(BIO_QUIET, &bio->bi_flags);
170 165
171 bio->bi_size -= nbytes; 166 bio_advance(bio, nbytes);
172 bio->bi_sector += (nbytes >> 9);
173
174 if (bio_integrity(bio))
175 bio_integrity_advance(bio, nbytes);
176 167
177 /* don't actually finish bio if it's part of flush sequence */ 168 /* don't actually finish bio if it's part of flush sequence */
178 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 169 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
@@ -1264,6 +1255,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
1264} 1255}
1265EXPORT_SYMBOL_GPL(part_round_stats); 1256EXPORT_SYMBOL_GPL(part_round_stats);
1266 1257
1258#ifdef CONFIG_PM_RUNTIME
1259static void blk_pm_put_request(struct request *rq)
1260{
1261 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
1262 pm_runtime_mark_last_busy(rq->q->dev);
1263}
1264#else
1265static inline void blk_pm_put_request(struct request *rq) {}
1266#endif
1267
1267/* 1268/*
1268 * queue lock must be held 1269 * queue lock must be held
1269 */ 1270 */
@@ -1274,6 +1275,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1274 if (unlikely(--req->ref_count)) 1275 if (unlikely(--req->ref_count))
1275 return; 1276 return;
1276 1277
1278 blk_pm_put_request(req);
1279
1277 elv_completed_request(q, req); 1280 elv_completed_request(q, req);
1278 1281
1279 /* this is a bio leak */ 1282 /* this is a bio leak */
@@ -1597,7 +1600,7 @@ static void handle_bad_sector(struct bio *bio)
1597 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", 1600 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1598 bdevname(bio->bi_bdev, b), 1601 bdevname(bio->bi_bdev, b),
1599 bio->bi_rw, 1602 bio->bi_rw,
1600 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1603 (unsigned long long)bio_end_sector(bio),
1601 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1604 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1602 1605
1603 set_bit(BIO_EOF, &bio->bi_flags); 1606 set_bit(BIO_EOF, &bio->bi_flags);
@@ -2053,6 +2056,28 @@ static void blk_account_io_done(struct request *req)
2053 } 2056 }
2054} 2057}
2055 2058
2059#ifdef CONFIG_PM_RUNTIME
2060/*
2061 * Don't process normal requests when queue is suspended
2062 * or in the process of suspending/resuming
2063 */
2064static struct request *blk_pm_peek_request(struct request_queue *q,
2065 struct request *rq)
2066{
2067 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2068 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
2069 return NULL;
2070 else
2071 return rq;
2072}
2073#else
2074static inline struct request *blk_pm_peek_request(struct request_queue *q,
2075 struct request *rq)
2076{
2077 return rq;
2078}
2079#endif
2080
2056/** 2081/**
2057 * blk_peek_request - peek at the top of a request queue 2082 * blk_peek_request - peek at the top of a request queue
2058 * @q: request queue to peek at 2083 * @q: request queue to peek at
@@ -2075,6 +2100,11 @@ struct request *blk_peek_request(struct request_queue *q)
2075 int ret; 2100 int ret;
2076 2101
2077 while ((rq = __elv_next_request(q)) != NULL) { 2102 while ((rq = __elv_next_request(q)) != NULL) {
2103
2104 rq = blk_pm_peek_request(q, rq);
2105 if (!rq)
2106 break;
2107
2078 if (!(rq->cmd_flags & REQ_STARTED)) { 2108 if (!(rq->cmd_flags & REQ_STARTED)) {
2079 /* 2109 /*
2080 * This is the first time the device driver 2110 * This is the first time the device driver
@@ -2253,8 +2283,7 @@ EXPORT_SYMBOL(blk_fetch_request);
2253 **/ 2283 **/
2254bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 2284bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2255{ 2285{
2256 int total_bytes, bio_nbytes, next_idx = 0; 2286 int total_bytes;
2257 struct bio *bio;
2258 2287
2259 if (!req->bio) 2288 if (!req->bio)
2260 return false; 2289 return false;
@@ -2300,56 +2329,21 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2300 2329
2301 blk_account_io_completion(req, nr_bytes); 2330 blk_account_io_completion(req, nr_bytes);
2302 2331
2303 total_bytes = bio_nbytes = 0; 2332 total_bytes = 0;
2304 while ((bio = req->bio) != NULL) { 2333 while (req->bio) {
2305 int nbytes; 2334 struct bio *bio = req->bio;
2335 unsigned bio_bytes = min(bio->bi_size, nr_bytes);
2306 2336
2307 if (nr_bytes >= bio->bi_size) { 2337 if (bio_bytes == bio->bi_size)
2308 req->bio = bio->bi_next; 2338 req->bio = bio->bi_next;
2309 nbytes = bio->bi_size;
2310 req_bio_endio(req, bio, nbytes, error);
2311 next_idx = 0;
2312 bio_nbytes = 0;
2313 } else {
2314 int idx = bio->bi_idx + next_idx;
2315 2339
2316 if (unlikely(idx >= bio->bi_vcnt)) { 2340 req_bio_endio(req, bio, bio_bytes, error);
2317 blk_dump_rq_flags(req, "__end_that");
2318 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2319 __func__, idx, bio->bi_vcnt);
2320 break;
2321 }
2322 2341
2323 nbytes = bio_iovec_idx(bio, idx)->bv_len; 2342 total_bytes += bio_bytes;
2324 BIO_BUG_ON(nbytes > bio->bi_size); 2343 nr_bytes -= bio_bytes;
2325 2344
2326 /* 2345 if (!nr_bytes)
2327 * not a complete bvec done 2346 break;
2328 */
2329 if (unlikely(nbytes > nr_bytes)) {
2330 bio_nbytes += nr_bytes;
2331 total_bytes += nr_bytes;
2332 break;
2333 }
2334
2335 /*
2336 * advance to the next vector
2337 */
2338 next_idx++;
2339 bio_nbytes += nbytes;
2340 }
2341
2342 total_bytes += nbytes;
2343 nr_bytes -= nbytes;
2344
2345 bio = req->bio;
2346 if (bio) {
2347 /*
2348 * end more in this run, or just return 'not-done'
2349 */
2350 if (unlikely(nr_bytes <= 0))
2351 break;
2352 }
2353 } 2347 }
2354 2348
2355 /* 2349 /*
@@ -2365,16 +2359,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2365 return false; 2359 return false;
2366 } 2360 }
2367 2361
2368 /*
2369 * if the request wasn't completed, update state
2370 */
2371 if (bio_nbytes) {
2372 req_bio_endio(req, bio, bio_nbytes, error);
2373 bio->bi_idx += next_idx;
2374 bio_iovec(bio)->bv_offset += nr_bytes;
2375 bio_iovec(bio)->bv_len -= nr_bytes;
2376 }
2377
2378 req->__data_len -= total_bytes; 2362 req->__data_len -= total_bytes;
2379 req->buffer = bio_data(req->bio); 2363 req->buffer = bio_data(req->bio);
2380 2364
@@ -3046,6 +3030,149 @@ void blk_finish_plug(struct blk_plug *plug)
3046} 3030}
3047EXPORT_SYMBOL(blk_finish_plug); 3031EXPORT_SYMBOL(blk_finish_plug);
3048 3032
3033#ifdef CONFIG_PM_RUNTIME
3034/**
3035 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3036 * @q: the queue of the device
3037 * @dev: the device the queue belongs to
3038 *
3039 * Description:
3040 * Initialize runtime-PM-related fields for @q and start auto suspend for
3041 * @dev. Drivers that want to take advantage of request-based runtime PM
3042 * should call this function after @dev has been initialized, and its
3043 * request queue @q has been allocated, and runtime PM for it can not happen
3044 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3045 * cases, driver should call this function before any I/O has taken place.
3046 *
3047 * This function takes care of setting up using auto suspend for the device,
3048 * the autosuspend delay is set to -1 to make runtime suspend impossible
3049 * until an updated value is either set by user or by driver. Drivers do
3050 * not need to touch other autosuspend settings.
3051 *
3052 * The block layer runtime PM is request based, so only works for drivers
3053 * that use request as their IO unit instead of those directly use bio's.
3054 */
3055void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3056{
3057 q->dev = dev;
3058 q->rpm_status = RPM_ACTIVE;
3059 pm_runtime_set_autosuspend_delay(q->dev, -1);
3060 pm_runtime_use_autosuspend(q->dev);
3061}
3062EXPORT_SYMBOL(blk_pm_runtime_init);
3063
3064/**
3065 * blk_pre_runtime_suspend - Pre runtime suspend check
3066 * @q: the queue of the device
3067 *
3068 * Description:
3069 * This function will check if runtime suspend is allowed for the device
3070 * by examining if there are any requests pending in the queue. If there
3071 * are requests pending, the device can not be runtime suspended; otherwise,
3072 * the queue's status will be updated to SUSPENDING and the driver can
3073 * proceed to suspend the device.
3074 *
3075 * For the not allowed case, we mark last busy for the device so that
3076 * runtime PM core will try to autosuspend it some time later.
3077 *
3078 * This function should be called near the start of the device's
3079 * runtime_suspend callback.
3080 *
3081 * Return:
3082 * 0 - OK to runtime suspend the device
3083 * -EBUSY - Device should not be runtime suspended
3084 */
3085int blk_pre_runtime_suspend(struct request_queue *q)
3086{
3087 int ret = 0;
3088
3089 spin_lock_irq(q->queue_lock);
3090 if (q->nr_pending) {
3091 ret = -EBUSY;
3092 pm_runtime_mark_last_busy(q->dev);
3093 } else {
3094 q->rpm_status = RPM_SUSPENDING;
3095 }
3096 spin_unlock_irq(q->queue_lock);
3097 return ret;
3098}
3099EXPORT_SYMBOL(blk_pre_runtime_suspend);
3100
3101/**
3102 * blk_post_runtime_suspend - Post runtime suspend processing
3103 * @q: the queue of the device
3104 * @err: return value of the device's runtime_suspend function
3105 *
3106 * Description:
3107 * Update the queue's runtime status according to the return value of the
3108 * device's runtime suspend function and mark last busy for the device so
3109 * that PM core will try to auto suspend the device at a later time.
3110 *
3111 * This function should be called near the end of the device's
3112 * runtime_suspend callback.
3113 */
3114void blk_post_runtime_suspend(struct request_queue *q, int err)
3115{
3116 spin_lock_irq(q->queue_lock);
3117 if (!err) {
3118 q->rpm_status = RPM_SUSPENDED;
3119 } else {
3120 q->rpm_status = RPM_ACTIVE;
3121 pm_runtime_mark_last_busy(q->dev);
3122 }
3123 spin_unlock_irq(q->queue_lock);
3124}
3125EXPORT_SYMBOL(blk_post_runtime_suspend);
3126
3127/**
3128 * blk_pre_runtime_resume - Pre runtime resume processing
3129 * @q: the queue of the device
3130 *
3131 * Description:
3132 * Update the queue's runtime status to RESUMING in preparation for the
3133 * runtime resume of the device.
3134 *
3135 * This function should be called near the start of the device's
3136 * runtime_resume callback.
3137 */
3138void blk_pre_runtime_resume(struct request_queue *q)
3139{
3140 spin_lock_irq(q->queue_lock);
3141 q->rpm_status = RPM_RESUMING;
3142 spin_unlock_irq(q->queue_lock);
3143}
3144EXPORT_SYMBOL(blk_pre_runtime_resume);
3145
3146/**
3147 * blk_post_runtime_resume - Post runtime resume processing
3148 * @q: the queue of the device
3149 * @err: return value of the device's runtime_resume function
3150 *
3151 * Description:
3152 * Update the queue's runtime status according to the return value of the
3153 * device's runtime_resume function. If it is successfully resumed, process
3154 * the requests that are queued into the device's queue when it is resuming
3155 * and then mark last busy and initiate autosuspend for it.
3156 *
3157 * This function should be called near the end of the device's
3158 * runtime_resume callback.
3159 */
3160void blk_post_runtime_resume(struct request_queue *q, int err)
3161{
3162 spin_lock_irq(q->queue_lock);
3163 if (!err) {
3164 q->rpm_status = RPM_ACTIVE;
3165 __blk_run_queue(q);
3166 pm_runtime_mark_last_busy(q->dev);
3167 pm_runtime_autosuspend(q->dev);
3168 } else {
3169 q->rpm_status = RPM_SUSPENDED;
3170 }
3171 spin_unlock_irq(q->queue_lock);
3172}
3173EXPORT_SYMBOL(blk_post_runtime_resume);
3174#endif
3175
3049int __init blk_dev_init(void) 3176int __init blk_dev_init(void)
3050{ 3177{
3051 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3178 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4f0ade74cfd0..d5cd3131c57a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2270 return NULL; 2270 return NULL;
2271 2271
2272 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 2272 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2273 if (cfqq) { 2273 if (cfqq)
2274 sector_t sector = bio->bi_sector + bio_sectors(bio); 2274 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2275
2276 return elv_rb_find(&cfqq->sort_list, sector);
2277 }
2278 2275
2279 return NULL; 2276 return NULL;
2280} 2277}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 90037b5eb17f..ba19a3afab79 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
132 * check for front merge 132 * check for front merge
133 */ 133 */
134 if (dd->front_merges) { 134 if (dd->front_merges) {
135 sector_t sector = bio->bi_sector + bio_sectors(bio); 135 sector_t sector = bio_end_sector(bio);
136 136
137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
138 if (__rq) { 138 if (__rq) {
diff --git a/block/elevator.c b/block/elevator.c
index a0ffdd943c98..eba5b04c29b1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,6 +34,7 @@
34#include <linux/blktrace_api.h> 34#include <linux/blktrace_api.h>
35#include <linux/hash.h> 35#include <linux/hash.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
37 38
38#include <trace/events/block.h> 39#include <trace/events/block.h>
39 40
@@ -536,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
536 e->type->ops.elevator_bio_merged_fn(q, rq, bio); 537 e->type->ops.elevator_bio_merged_fn(q, rq, bio);
537} 538}
538 539
540#ifdef CONFIG_PM_RUNTIME
541static void blk_pm_requeue_request(struct request *rq)
542{
543 if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
544 rq->q->nr_pending--;
545}
546
547static void blk_pm_add_request(struct request_queue *q, struct request *rq)
548{
549 if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
550 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
551 pm_request_resume(q->dev);
552}
553#else
554static inline void blk_pm_requeue_request(struct request *rq) {}
555static inline void blk_pm_add_request(struct request_queue *q,
556 struct request *rq)
557{
558}
559#endif
560
539void elv_requeue_request(struct request_queue *q, struct request *rq) 561void elv_requeue_request(struct request_queue *q, struct request *rq)
540{ 562{
541 /* 563 /*
@@ -550,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
550 572
551 rq->cmd_flags &= ~REQ_STARTED; 573 rq->cmd_flags &= ~REQ_STARTED;
552 574
575 blk_pm_requeue_request(rq);
576
553 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); 577 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
554} 578}
555 579
@@ -572,6 +596,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
572{ 596{
573 trace_block_rq_insert(q, rq); 597 trace_block_rq_insert(q, rq);
574 598
599 blk_pm_add_request(q, rq);
600
575 rq->q = q; 601 rq->q = q;
576 602
577 if (rq->cmd_flags & REQ_SOFTBARRIER) { 603 if (rq->cmd_flags & REQ_SOFTBARRIER) {
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index ff5804e2f1d2..c85fc895ecdb 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -238,7 +238,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
238 le32_to_cpu(gpt->sizeof_partition_entry); 238 le32_to_cpu(gpt->sizeof_partition_entry);
239 if (!count) 239 if (!count)
240 return NULL; 240 return NULL;
241 pte = kzalloc(count, GFP_KERNEL); 241 pte = kmalloc(count, GFP_KERNEL);
242 if (!pte) 242 if (!pte)
243 return NULL; 243 return NULL;
244 244
@@ -267,7 +267,7 @@ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
267 gpt_header *gpt; 267 gpt_header *gpt;
268 unsigned ssz = bdev_logical_block_size(state->bdev); 268 unsigned ssz = bdev_logical_block_size(state->bdev);
269 269
270 gpt = kzalloc(ssz, GFP_KERNEL); 270 gpt = kmalloc(ssz, GFP_KERNEL);
271 if (!gpt) 271 if (!gpt)
272 return NULL; 272 return NULL;
273 273