aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLin Ming <ming.m.lin@intel.com>2013-03-22 23:42:26 -0400
committerJens Axboe <axboe@kernel.dk>2013-03-23 00:22:15 -0400
commit6c9546675864f51506af69eca388e5d922942c56 (patch)
treee689048677a1a066960ffd7ebd8330632476c045
parent66311274691ec65972cad3626057fa8d00c146d8 (diff)
block: add runtime pm helpers
Add runtime pm helper functions: void blk_pm_runtime_init(struct request_queue *q, struct device *dev) - Initialization function for drivers to call. int blk_pre_runtime_suspend(struct request_queue *q) - If any requests are in the queue, mark last busy and return -EBUSY. Otherwise set q->rpm_status to RPM_SUSPENDING and return 0. void blk_post_runtime_suspend(struct request_queue *q, int err) - If the suspend succeeded then set q->rpm_status to RPM_SUSPENDED. Otherwise set it to RPM_ACTIVE and mark last busy. void blk_pre_runtime_resume(struct request_queue *q) - Set q->rpm_status to RPM_RESUMING. void blk_post_runtime_resume(struct request_queue *q, int err) - If the resume succeeded then set q->rpm_status to RPM_ACTIVE and call __blk_run_queue, then mark last busy and autosuspend. Otherwise set q->rpm_status to RPM_SUSPENDED. The idea and API is designed by Alan Stern and described here: http://marc.info/?l=linux-scsi&m=133727953625963&w=2 Signed-off-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Aaron Lu <aaron.lu@intel.com> Acked-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c144
-rw-r--r--include/linux/blkdev.h27
2 files changed, 171 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 074b758efc42..123d240132bf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -30,6 +30,7 @@
30#include <linux/list_sort.h> 30#include <linux/list_sort.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/pm_runtime.h>
33 34
34#define CREATE_TRACE_POINTS 35#define CREATE_TRACE_POINTS
35#include <trace/events/block.h> 36#include <trace/events/block.h>
@@ -3045,6 +3046,149 @@ void blk_finish_plug(struct blk_plug *plug)
3045} 3046}
3046EXPORT_SYMBOL(blk_finish_plug); 3047EXPORT_SYMBOL(blk_finish_plug);
3047 3048
3049#ifdef CONFIG_PM_RUNTIME
3050/**
3051 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3052 * @q: the queue of the device
3053 * @dev: the device the queue belongs to
3054 *
3055 * Description:
3056 * Initialize runtime-PM-related fields for @q and start auto suspend for
3057 * @dev. Drivers that want to take advantage of request-based runtime PM
3058 * should call this function after @dev has been initialized, and its
3059 * request queue @q has been allocated, and runtime PM for it can not happen
3060 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3061 * cases, driver should call this function before any I/O has taken place.
3062 *
3063 * This function takes care of setting up using auto suspend for the device,
3064 * the autosuspend delay is set to -1 to make runtime suspend impossible
3065 * until an updated value is either set by user or by driver. Drivers do
3066 * not need to touch other autosuspend settings.
3067 *
3068 * The block layer runtime PM is request based, so only works for drivers
3069 * that use request as their IO unit instead of those directly use bio's.
3070 */
3071void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3072{
3073 q->dev = dev;
3074 q->rpm_status = RPM_ACTIVE;
3075 pm_runtime_set_autosuspend_delay(q->dev, -1);
3076 pm_runtime_use_autosuspend(q->dev);
3077}
3078EXPORT_SYMBOL(blk_pm_runtime_init);
3079
3080/**
3081 * blk_pre_runtime_suspend - Pre runtime suspend check
3082 * @q: the queue of the device
3083 *
3084 * Description:
3085 * This function will check if runtime suspend is allowed for the device
3086 * by examining if there are any requests pending in the queue. If there
3087 * are requests pending, the device can not be runtime suspended; otherwise,
3088 * the queue's status will be updated to SUSPENDING and the driver can
3089 * proceed to suspend the device.
3090 *
3091 * For the not allowed case, we mark last busy for the device so that
3092 * runtime PM core will try to autosuspend it some time later.
3093 *
3094 * This function should be called near the start of the device's
3095 * runtime_suspend callback.
3096 *
3097 * Return:
3098 * 0 - OK to runtime suspend the device
3099 * -EBUSY - Device should not be runtime suspended
3100 */
3101int blk_pre_runtime_suspend(struct request_queue *q)
3102{
3103 int ret = 0;
3104
3105 spin_lock_irq(q->queue_lock);
3106 if (q->nr_pending) {
3107 ret = -EBUSY;
3108 pm_runtime_mark_last_busy(q->dev);
3109 } else {
3110 q->rpm_status = RPM_SUSPENDING;
3111 }
3112 spin_unlock_irq(q->queue_lock);
3113 return ret;
3114}
3115EXPORT_SYMBOL(blk_pre_runtime_suspend);
3116
3117/**
3118 * blk_post_runtime_suspend - Post runtime suspend processing
3119 * @q: the queue of the device
3120 * @err: return value of the device's runtime_suspend function
3121 *
3122 * Description:
3123 * Update the queue's runtime status according to the return value of the
3124 * device's runtime suspend function and mark last busy for the device so
3125 * that PM core will try to auto suspend the device at a later time.
3126 *
3127 * This function should be called near the end of the device's
3128 * runtime_suspend callback.
3129 */
3130void blk_post_runtime_suspend(struct request_queue *q, int err)
3131{
3132 spin_lock_irq(q->queue_lock);
3133 if (!err) {
3134 q->rpm_status = RPM_SUSPENDED;
3135 } else {
3136 q->rpm_status = RPM_ACTIVE;
3137 pm_runtime_mark_last_busy(q->dev);
3138 }
3139 spin_unlock_irq(q->queue_lock);
3140}
3141EXPORT_SYMBOL(blk_post_runtime_suspend);
3142
3143/**
3144 * blk_pre_runtime_resume - Pre runtime resume processing
3145 * @q: the queue of the device
3146 *
3147 * Description:
3148 * Update the queue's runtime status to RESUMING in preparation for the
3149 * runtime resume of the device.
3150 *
3151 * This function should be called near the start of the device's
3152 * runtime_resume callback.
3153 */
3154void blk_pre_runtime_resume(struct request_queue *q)
3155{
3156 spin_lock_irq(q->queue_lock);
3157 q->rpm_status = RPM_RESUMING;
3158 spin_unlock_irq(q->queue_lock);
3159}
3160EXPORT_SYMBOL(blk_pre_runtime_resume);
3161
3162/**
3163 * blk_post_runtime_resume - Post runtime resume processing
3164 * @q: the queue of the device
3165 * @err: return value of the device's runtime_resume function
3166 *
3167 * Description:
3168 * Update the queue's runtime status according to the return value of the
3169 * device's runtime_resume function. If it is successfully resumed, process
3170 * the requests that are queued into the device's queue when it is resuming
3171 * and then mark last busy and initiate autosuspend for it.
3172 *
3173 * This function should be called near the end of the device's
3174 * runtime_resume callback.
3175 */
3176void blk_post_runtime_resume(struct request_queue *q, int err)
3177{
3178 spin_lock_irq(q->queue_lock);
3179 if (!err) {
3180 q->rpm_status = RPM_ACTIVE;
3181 __blk_run_queue(q);
3182 pm_runtime_mark_last_busy(q->dev);
3183 pm_runtime_autosuspend(q->dev);
3184 } else {
3185 q->rpm_status = RPM_SUSPENDED;
3186 }
3187 spin_unlock_irq(q->queue_lock);
3188}
3189EXPORT_SYMBOL(blk_post_runtime_resume);
3190#endif
3191
3048int __init blk_dev_init(void) 3192int __init blk_dev_init(void)
3049{ 3193{
3050 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3194 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 78feda9bbae2..89d89c7162aa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -361,6 +361,12 @@ struct request_queue {
361 */ 361 */
362 struct kobject kobj; 362 struct kobject kobj;
363 363
364#ifdef CONFIG_PM_RUNTIME
365 struct device *dev;
366 int rpm_status;
367 unsigned int nr_pending;
368#endif
369
364 /* 370 /*
365 * queue settings 371 * queue settings
366 */ 372 */
@@ -961,6 +967,27 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int);
961extern void blk_put_queue(struct request_queue *); 967extern void blk_put_queue(struct request_queue *);
962 968
963/* 969/*
970 * block layer runtime pm functions
971 */
972#ifdef CONFIG_PM_RUNTIME
973extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
974extern int blk_pre_runtime_suspend(struct request_queue *q);
975extern void blk_post_runtime_suspend(struct request_queue *q, int err);
976extern void blk_pre_runtime_resume(struct request_queue *q);
977extern void blk_post_runtime_resume(struct request_queue *q, int err);
978#else
979static inline void blk_pm_runtime_init(struct request_queue *q,
980 struct device *dev) {}
981static inline int blk_pre_runtime_suspend(struct request_queue *q)
982{
983 return -ENOSYS;
984}
985static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
986static inline void blk_pre_runtime_resume(struct request_queue *q) {}
987static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
988#endif
989
990/*
964 * blk_plug permits building a queue of related requests by holding the I/O 991 * blk_plug permits building a queue of related requests by holding the I/O
965 * fragments for a short period. This allows merging of sequential requests 992 * fragments for a short period. This allows merging of sequential requests
966 * into single larger request. As the requests are moved from a per-task list to 993 * into single larger request. As the requests are moved from a per-task list to