aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLin Ming <ming.m.lin@intel.com>2013-03-22 23:42:27 -0400
committerJens Axboe <axboe@kernel.dk>2013-03-23 00:22:15 -0400
commitc8158819d506a8aedeca53c52dfb709a0aabe011 (patch)
treeacabca463a1898931cd325c415a51a819eabee00 /block
parent6c9546675864f51506af69eca388e5d922942c56 (diff)
block: implement runtime pm strategy
When a request is added: If device is suspended or is suspending and the request is not a PM request, resume the device. When the last request finishes: Call pm_runtime_mark_last_busy(). When pick a request: If device is resuming/suspending, then only PM request is allowed to go. The idea and API is designed by Alan Stern and described here: http://marc.info/?l=linux-scsi&m=133727953625963&w=2 Signed-off-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Aaron Lu <aaron.lu@intel.com> Acked-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c39
-rw-r--r--block/elevator.c26
2 files changed, 65 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 123d240132bf..441f3488a766 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1264,6 +1264,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
1264} 1264}
1265EXPORT_SYMBOL_GPL(part_round_stats); 1265EXPORT_SYMBOL_GPL(part_round_stats);
1266 1266
1267#ifdef CONFIG_PM_RUNTIME
1268static void blk_pm_put_request(struct request *rq)
1269{
1270 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
1271 pm_runtime_mark_last_busy(rq->q->dev);
1272}
1273#else
1274static inline void blk_pm_put_request(struct request *rq) {}
1275#endif
1276
1267/* 1277/*
1268 * queue lock must be held 1278 * queue lock must be held
1269 */ 1279 */
@@ -1274,6 +1284,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1274 if (unlikely(--req->ref_count)) 1284 if (unlikely(--req->ref_count))
1275 return; 1285 return;
1276 1286
1287 blk_pm_put_request(req);
1288
1277 elv_completed_request(q, req); 1289 elv_completed_request(q, req);
1278 1290
1279 /* this is a bio leak */ 1291 /* this is a bio leak */
@@ -2053,6 +2065,28 @@ static void blk_account_io_done(struct request *req)
2053 } 2065 }
2054} 2066}
2055 2067
2068#ifdef CONFIG_PM_RUNTIME
2069/*
2070 * Don't process normal requests when queue is suspended
2071 * or in the process of suspending/resuming
2072 */
2073static struct request *blk_pm_peek_request(struct request_queue *q,
2074 struct request *rq)
2075{
2076 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2077 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
2078 return NULL;
2079 else
2080 return rq;
2081}
2082#else
2083static inline struct request *blk_pm_peek_request(struct request_queue *q,
2084 struct request *rq)
2085{
2086 return rq;
2087}
2088#endif
2089
2056/** 2090/**
2057 * blk_peek_request - peek at the top of a request queue 2091 * blk_peek_request - peek at the top of a request queue
2058 * @q: request queue to peek at 2092 * @q: request queue to peek at
@@ -2075,6 +2109,11 @@ struct request *blk_peek_request(struct request_queue *q)
2075 int ret; 2109 int ret;
2076 2110
2077 while ((rq = __elv_next_request(q)) != NULL) { 2111 while ((rq = __elv_next_request(q)) != NULL) {
2112
2113 rq = blk_pm_peek_request(q, rq);
2114 if (!rq)
2115 break;
2116
2078 if (!(rq->cmd_flags & REQ_STARTED)) { 2117 if (!(rq->cmd_flags & REQ_STARTED)) {
2079 /* 2118 /*
2080 * This is the first time the device driver 2119 * This is the first time the device driver
diff --git a/block/elevator.c b/block/elevator.c
index a0ffdd943c98..eba5b04c29b1 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,6 +34,7 @@
34#include <linux/blktrace_api.h> 34#include <linux/blktrace_api.h>
35#include <linux/hash.h> 35#include <linux/hash.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
37 38
38#include <trace/events/block.h> 39#include <trace/events/block.h>
39 40
@@ -536,6 +537,27 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
536 e->type->ops.elevator_bio_merged_fn(q, rq, bio); 537 e->type->ops.elevator_bio_merged_fn(q, rq, bio);
537} 538}
538 539
540#ifdef CONFIG_PM_RUNTIME
541static void blk_pm_requeue_request(struct request *rq)
542{
543 if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
544 rq->q->nr_pending--;
545}
546
547static void blk_pm_add_request(struct request_queue *q, struct request *rq)
548{
549 if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
550 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
551 pm_request_resume(q->dev);
552}
553#else
554static inline void blk_pm_requeue_request(struct request *rq) {}
555static inline void blk_pm_add_request(struct request_queue *q,
556 struct request *rq)
557{
558}
559#endif
560
539void elv_requeue_request(struct request_queue *q, struct request *rq) 561void elv_requeue_request(struct request_queue *q, struct request *rq)
540{ 562{
541 /* 563 /*
@@ -550,6 +572,8 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
550 572
551 rq->cmd_flags &= ~REQ_STARTED; 573 rq->cmd_flags &= ~REQ_STARTED;
552 574
575 blk_pm_requeue_request(rq);
576
553 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); 577 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
554} 578}
555 579
@@ -572,6 +596,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
572{ 596{
573 trace_block_rq_insert(q, rq); 597 trace_block_rq_insert(q, rq);
574 598
599 blk_pm_add_request(q, rq);
600
575 rq->q = q; 601 rq->q = q;
576 602
577 if (rq->cmd_flags & REQ_SOFTBARRIER) { 603 if (rq->cmd_flags & REQ_SOFTBARRIER) {