summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2018-09-26 17:01:03 -0400
committerJens Axboe <axboe@kernel.dk>2018-09-26 17:11:28 -0400
commitbca6b067b0b269a7b8ba129e2a918309ca8b4a55 (patch)
tree8c41db994ddaf2023d881189cd947fce24f3b7d3
parent3cfa210bf3fe0803cca17f3775d8cf2360d5f443 (diff)
block: Move power management code into a new source file
Move the code for runtime power management from blk-core.c into the new source file blk-pm.c. Move the corresponding declarations from <linux/blkdev.h> into <linux/blk-pm.h>. For CONFIG_PM=n, leave out the declarations of the functions that are not used in that mode. This patch not only reduces the number of #ifdefs in the block layer core code but also reduces the size of header file <linux/blkdev.h> and hence should help to reduce the build time of the Linux kernel if CONFIG_PM is not defined. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/Kconfig3
-rw-r--r--block/Makefile1
-rw-r--r--block/blk-core.c196
-rw-r--r--block/blk-pm.c188
-rw-r--r--block/blk-pm.h43
-rw-r--r--block/elevator.c22
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blkdev.h23
11 files changed, 264 insertions, 239 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 1f2469a0123c..85263e7bded6 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -228,4 +228,7 @@ config BLK_MQ_RDMA
228 depends on BLOCK && INFINIBAND 228 depends on BLOCK && INFINIBAND
229 default y 229 default y
230 230
231config BLK_PM
232 def_bool BLOCK && PM
233
231source block/Kconfig.iosched 234source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 572b33f32c07..27eac600474f 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o
37obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o 37obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
38obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o 38obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
39obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o 39obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
40obj-$(CONFIG_BLK_PM) += blk-pm.o
diff --git a/block/blk-core.c b/block/blk-core.c
index 4dbc93f43b38..6d4dd176bd9d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -42,6 +42,7 @@
42#include "blk.h" 42#include "blk.h"
43#include "blk-mq.h" 43#include "blk-mq.h"
44#include "blk-mq-sched.h" 44#include "blk-mq-sched.h"
45#include "blk-pm.h"
45#include "blk-rq-qos.h" 46#include "blk-rq-qos.h"
46 47
47#ifdef CONFIG_DEBUG_FS 48#ifdef CONFIG_DEBUG_FS
@@ -1726,16 +1727,6 @@ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
1726} 1727}
1727EXPORT_SYMBOL_GPL(part_round_stats); 1728EXPORT_SYMBOL_GPL(part_round_stats);
1728 1729
1729#ifdef CONFIG_PM
1730static void blk_pm_put_request(struct request *rq)
1731{
1732 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1733 pm_runtime_mark_last_busy(rq->q->dev);
1734}
1735#else
1736static inline void blk_pm_put_request(struct request *rq) {}
1737#endif
1738
1739void __blk_put_request(struct request_queue *q, struct request *req) 1730void __blk_put_request(struct request_queue *q, struct request *req)
1740{ 1731{
1741 req_flags_t rq_flags = req->rq_flags; 1732 req_flags_t rq_flags = req->rq_flags;
@@ -3757,191 +3748,6 @@ void blk_finish_plug(struct blk_plug *plug)
3757} 3748}
3758EXPORT_SYMBOL(blk_finish_plug); 3749EXPORT_SYMBOL(blk_finish_plug);
3759 3750
3760#ifdef CONFIG_PM
3761/**
3762 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3763 * @q: the queue of the device
3764 * @dev: the device the queue belongs to
3765 *
3766 * Description:
3767 * Initialize runtime-PM-related fields for @q and start auto suspend for
3768 * @dev. Drivers that want to take advantage of request-based runtime PM
3769 * should call this function after @dev has been initialized, and its
3770 * request queue @q has been allocated, and runtime PM for it can not happen
3771 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3772 * cases, driver should call this function before any I/O has taken place.
3773 *
3774 * This function takes care of setting up using auto suspend for the device,
3775 * the autosuspend delay is set to -1 to make runtime suspend impossible
3776 * until an updated value is either set by user or by driver. Drivers do
3777 * not need to touch other autosuspend settings.
3778 *
3779 * The block layer runtime PM is request based, so only works for drivers
3780 * that use request as their IO unit instead of those directly use bio's.
3781 */
3782void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3783{
3784 /* Don't enable runtime PM for blk-mq until it is ready */
3785 if (q->mq_ops) {
3786 pm_runtime_disable(dev);
3787 return;
3788 }
3789
3790 q->dev = dev;
3791 q->rpm_status = RPM_ACTIVE;
3792 pm_runtime_set_autosuspend_delay(q->dev, -1);
3793 pm_runtime_use_autosuspend(q->dev);
3794}
3795EXPORT_SYMBOL(blk_pm_runtime_init);
3796
3797/**
3798 * blk_pre_runtime_suspend - Pre runtime suspend check
3799 * @q: the queue of the device
3800 *
3801 * Description:
3802 * This function will check if runtime suspend is allowed for the device
3803 * by examining if there are any requests pending in the queue. If there
3804 * are requests pending, the device can not be runtime suspended; otherwise,
3805 * the queue's status will be updated to SUSPENDING and the driver can
3806 * proceed to suspend the device.
3807 *
3808 * For the not allowed case, we mark last busy for the device so that
3809 * runtime PM core will try to autosuspend it some time later.
3810 *
3811 * This function should be called near the start of the device's
3812 * runtime_suspend callback.
3813 *
3814 * Return:
3815 * 0 - OK to runtime suspend the device
3816 * -EBUSY - Device should not be runtime suspended
3817 */
3818int blk_pre_runtime_suspend(struct request_queue *q)
3819{
3820 int ret = 0;
3821
3822 if (!q->dev)
3823 return ret;
3824
3825 spin_lock_irq(q->queue_lock);
3826 if (q->nr_pending) {
3827 ret = -EBUSY;
3828 pm_runtime_mark_last_busy(q->dev);
3829 } else {
3830 q->rpm_status = RPM_SUSPENDING;
3831 }
3832 spin_unlock_irq(q->queue_lock);
3833 return ret;
3834}
3835EXPORT_SYMBOL(blk_pre_runtime_suspend);
3836
3837/**
3838 * blk_post_runtime_suspend - Post runtime suspend processing
3839 * @q: the queue of the device
3840 * @err: return value of the device's runtime_suspend function
3841 *
3842 * Description:
3843 * Update the queue's runtime status according to the return value of the
3844 * device's runtime suspend function and mark last busy for the device so
3845 * that PM core will try to auto suspend the device at a later time.
3846 *
3847 * This function should be called near the end of the device's
3848 * runtime_suspend callback.
3849 */
3850void blk_post_runtime_suspend(struct request_queue *q, int err)
3851{
3852 if (!q->dev)
3853 return;
3854
3855 spin_lock_irq(q->queue_lock);
3856 if (!err) {
3857 q->rpm_status = RPM_SUSPENDED;
3858 } else {
3859 q->rpm_status = RPM_ACTIVE;
3860 pm_runtime_mark_last_busy(q->dev);
3861 }
3862 spin_unlock_irq(q->queue_lock);
3863}
3864EXPORT_SYMBOL(blk_post_runtime_suspend);
3865
3866/**
3867 * blk_pre_runtime_resume - Pre runtime resume processing
3868 * @q: the queue of the device
3869 *
3870 * Description:
3871 * Update the queue's runtime status to RESUMING in preparation for the
3872 * runtime resume of the device.
3873 *
3874 * This function should be called near the start of the device's
3875 * runtime_resume callback.
3876 */
3877void blk_pre_runtime_resume(struct request_queue *q)
3878{
3879 if (!q->dev)
3880 return;
3881
3882 spin_lock_irq(q->queue_lock);
3883 q->rpm_status = RPM_RESUMING;
3884 spin_unlock_irq(q->queue_lock);
3885}
3886EXPORT_SYMBOL(blk_pre_runtime_resume);
3887
3888/**
3889 * blk_post_runtime_resume - Post runtime resume processing
3890 * @q: the queue of the device
3891 * @err: return value of the device's runtime_resume function
3892 *
3893 * Description:
3894 * Update the queue's runtime status according to the return value of the
3895 * device's runtime_resume function. If it is successfully resumed, process
3896 * the requests that are queued into the device's queue when it is resuming
3897 * and then mark last busy and initiate autosuspend for it.
3898 *
3899 * This function should be called near the end of the device's
3900 * runtime_resume callback.
3901 */
3902void blk_post_runtime_resume(struct request_queue *q, int err)
3903{
3904 if (!q->dev)
3905 return;
3906
3907 spin_lock_irq(q->queue_lock);
3908 if (!err) {
3909 q->rpm_status = RPM_ACTIVE;
3910 __blk_run_queue(q);
3911 pm_runtime_mark_last_busy(q->dev);
3912 pm_request_autosuspend(q->dev);
3913 } else {
3914 q->rpm_status = RPM_SUSPENDED;
3915 }
3916 spin_unlock_irq(q->queue_lock);
3917}
3918EXPORT_SYMBOL(blk_post_runtime_resume);
3919
3920/**
3921 * blk_set_runtime_active - Force runtime status of the queue to be active
3922 * @q: the queue of the device
3923 *
3924 * If the device is left runtime suspended during system suspend the resume
3925 * hook typically resumes the device and corrects runtime status
3926 * accordingly. However, that does not affect the queue runtime PM status
3927 * which is still "suspended". This prevents processing requests from the
3928 * queue.
3929 *
3930 * This function can be used in driver's resume hook to correct queue
3931 * runtime PM status and re-enable peeking requests from the queue. It
3932 * should be called before first request is added to the queue.
3933 */
3934void blk_set_runtime_active(struct request_queue *q)
3935{
3936 spin_lock_irq(q->queue_lock);
3937 q->rpm_status = RPM_ACTIVE;
3938 pm_runtime_mark_last_busy(q->dev);
3939 pm_request_autosuspend(q->dev);
3940 spin_unlock_irq(q->queue_lock);
3941}
3942EXPORT_SYMBOL(blk_set_runtime_active);
3943#endif
3944
3945int __init blk_dev_init(void) 3751int __init blk_dev_init(void)
3946{ 3752{
3947 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3753 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
diff --git a/block/blk-pm.c b/block/blk-pm.c
new file mode 100644
index 000000000000..9b636960d285
--- /dev/null
+++ b/block/blk-pm.c
@@ -0,0 +1,188 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/blk-pm.h>
4#include <linux/blkdev.h>
5#include <linux/pm_runtime.h>
6
7/**
8 * blk_pm_runtime_init - Block layer runtime PM initialization routine
9 * @q: the queue of the device
10 * @dev: the device the queue belongs to
11 *
12 * Description:
13 * Initialize runtime-PM-related fields for @q and start auto suspend for
14 * @dev. Drivers that want to take advantage of request-based runtime PM
15 * should call this function after @dev has been initialized, and its
16 * request queue @q has been allocated, and runtime PM for it can not happen
17 * yet(either due to disabled/forbidden or its usage_count > 0). In most
18 * cases, driver should call this function before any I/O has taken place.
19 *
20 * This function takes care of setting up using auto suspend for the device,
21 * the autosuspend delay is set to -1 to make runtime suspend impossible
22 * until an updated value is either set by user or by driver. Drivers do
23 * not need to touch other autosuspend settings.
24 *
25 * The block layer runtime PM is request based, so only works for drivers
26 * that use request as their IO unit instead of those directly use bio's.
27 */
28void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
29{
30 /* Don't enable runtime PM for blk-mq until it is ready */
31 if (q->mq_ops) {
32 pm_runtime_disable(dev);
33 return;
34 }
35
36 q->dev = dev;
37 q->rpm_status = RPM_ACTIVE;
38 pm_runtime_set_autosuspend_delay(q->dev, -1);
39 pm_runtime_use_autosuspend(q->dev);
40}
41EXPORT_SYMBOL(blk_pm_runtime_init);
42
43/**
44 * blk_pre_runtime_suspend - Pre runtime suspend check
45 * @q: the queue of the device
46 *
47 * Description:
48 * This function will check if runtime suspend is allowed for the device
49 * by examining if there are any requests pending in the queue. If there
50 * are requests pending, the device can not be runtime suspended; otherwise,
51 * the queue's status will be updated to SUSPENDING and the driver can
52 * proceed to suspend the device.
53 *
54 * For the not allowed case, we mark last busy for the device so that
55 * runtime PM core will try to autosuspend it some time later.
56 *
57 * This function should be called near the start of the device's
58 * runtime_suspend callback.
59 *
60 * Return:
61 * 0 - OK to runtime suspend the device
62 * -EBUSY - Device should not be runtime suspended
63 */
64int blk_pre_runtime_suspend(struct request_queue *q)
65{
66 int ret = 0;
67
68 if (!q->dev)
69 return ret;
70
71 spin_lock_irq(q->queue_lock);
72 if (q->nr_pending) {
73 ret = -EBUSY;
74 pm_runtime_mark_last_busy(q->dev);
75 } else {
76 q->rpm_status = RPM_SUSPENDING;
77 }
78 spin_unlock_irq(q->queue_lock);
79 return ret;
80}
81EXPORT_SYMBOL(blk_pre_runtime_suspend);
82
83/**
84 * blk_post_runtime_suspend - Post runtime suspend processing
85 * @q: the queue of the device
86 * @err: return value of the device's runtime_suspend function
87 *
88 * Description:
89 * Update the queue's runtime status according to the return value of the
90 * device's runtime suspend function and mark last busy for the device so
91 * that PM core will try to auto suspend the device at a later time.
92 *
93 * This function should be called near the end of the device's
94 * runtime_suspend callback.
95 */
96void blk_post_runtime_suspend(struct request_queue *q, int err)
97{
98 if (!q->dev)
99 return;
100
101 spin_lock_irq(q->queue_lock);
102 if (!err) {
103 q->rpm_status = RPM_SUSPENDED;
104 } else {
105 q->rpm_status = RPM_ACTIVE;
106 pm_runtime_mark_last_busy(q->dev);
107 }
108 spin_unlock_irq(q->queue_lock);
109}
110EXPORT_SYMBOL(blk_post_runtime_suspend);
111
112/**
113 * blk_pre_runtime_resume - Pre runtime resume processing
114 * @q: the queue of the device
115 *
116 * Description:
117 * Update the queue's runtime status to RESUMING in preparation for the
118 * runtime resume of the device.
119 *
120 * This function should be called near the start of the device's
121 * runtime_resume callback.
122 */
123void blk_pre_runtime_resume(struct request_queue *q)
124{
125 if (!q->dev)
126 return;
127
128 spin_lock_irq(q->queue_lock);
129 q->rpm_status = RPM_RESUMING;
130 spin_unlock_irq(q->queue_lock);
131}
132EXPORT_SYMBOL(blk_pre_runtime_resume);
133
134/**
135 * blk_post_runtime_resume - Post runtime resume processing
136 * @q: the queue of the device
137 * @err: return value of the device's runtime_resume function
138 *
139 * Description:
140 * Update the queue's runtime status according to the return value of the
141 * device's runtime_resume function. If it is successfully resumed, process
142 * the requests that are queued into the device's queue when it is resuming
143 * and then mark last busy and initiate autosuspend for it.
144 *
145 * This function should be called near the end of the device's
146 * runtime_resume callback.
147 */
148void blk_post_runtime_resume(struct request_queue *q, int err)
149{
150 if (!q->dev)
151 return;
152
153 spin_lock_irq(q->queue_lock);
154 if (!err) {
155 q->rpm_status = RPM_ACTIVE;
156 __blk_run_queue(q);
157 pm_runtime_mark_last_busy(q->dev);
158 pm_request_autosuspend(q->dev);
159 } else {
160 q->rpm_status = RPM_SUSPENDED;
161 }
162 spin_unlock_irq(q->queue_lock);
163}
164EXPORT_SYMBOL(blk_post_runtime_resume);
165
166/**
167 * blk_set_runtime_active - Force runtime status of the queue to be active
168 * @q: the queue of the device
169 *
170 * If the device is left runtime suspended during system suspend the resume
171 * hook typically resumes the device and corrects runtime status
172 * accordingly. However, that does not affect the queue runtime PM status
173 * which is still "suspended". This prevents processing requests from the
174 * queue.
175 *
176 * This function can be used in driver's resume hook to correct queue
177 * runtime PM status and re-enable peeking requests from the queue. It
178 * should be called before first request is added to the queue.
179 */
180void blk_set_runtime_active(struct request_queue *q)
181{
182 spin_lock_irq(q->queue_lock);
183 q->rpm_status = RPM_ACTIVE;
184 pm_runtime_mark_last_busy(q->dev);
185 pm_request_autosuspend(q->dev);
186 spin_unlock_irq(q->queue_lock);
187}
188EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
new file mode 100644
index 000000000000..1ffc8ef203ec
--- /dev/null
+++ b/block/blk-pm.h
@@ -0,0 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _BLOCK_BLK_PM_H_
4#define _BLOCK_BLK_PM_H_
5
6#include <linux/pm_runtime.h>
7
8#ifdef CONFIG_PM
9static inline void blk_pm_requeue_request(struct request *rq)
10{
11 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
12 rq->q->nr_pending--;
13}
14
15static inline void blk_pm_add_request(struct request_queue *q,
16 struct request *rq)
17{
18 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
19 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
20 pm_request_resume(q->dev);
21}
22
23static inline void blk_pm_put_request(struct request *rq)
24{
25 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
26 pm_runtime_mark_last_busy(rq->q->dev);
27}
28#else
29static inline void blk_pm_requeue_request(struct request *rq)
30{
31}
32
33static inline void blk_pm_add_request(struct request_queue *q,
34 struct request *rq)
35{
36}
37
38static inline void blk_pm_put_request(struct request *rq)
39{
40}
41#endif
42
43#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/elevator.c b/block/elevator.c
index 6a06b5d040e5..e18ac68626e3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -41,6 +41,7 @@
41 41
42#include "blk.h" 42#include "blk.h"
43#include "blk-mq-sched.h" 43#include "blk-mq-sched.h"
44#include "blk-pm.h"
44#include "blk-wbt.h" 45#include "blk-wbt.h"
45 46
46static DEFINE_SPINLOCK(elv_list_lock); 47static DEFINE_SPINLOCK(elv_list_lock);
@@ -557,27 +558,6 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
557 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio); 558 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
558} 559}
559 560
560#ifdef CONFIG_PM
561static void blk_pm_requeue_request(struct request *rq)
562{
563 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
564 rq->q->nr_pending--;
565}
566
567static void blk_pm_add_request(struct request_queue *q, struct request *rq)
568{
569 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
570 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
571 pm_request_resume(q->dev);
572}
573#else
574static inline void blk_pm_requeue_request(struct request *rq) {}
575static inline void blk_pm_add_request(struct request_queue *q,
576 struct request *rq)
577{
578}
579#endif
580
581void elv_requeue_request(struct request_queue *q, struct request *rq) 561void elv_requeue_request(struct request_queue *q, struct request *rq)
582{ 562{
583 /* 563 /*
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b44c1bb687a2..a2b4179bfdf7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,6 +8,7 @@
8#include <linux/pm_runtime.h> 8#include <linux/pm_runtime.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/async.h> 10#include <linux/async.h>
11#include <linux/blk-pm.h>
11 12
12#include <scsi/scsi.h> 13#include <scsi/scsi.h>
13#include <scsi/scsi_device.h> 14#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..64514e8359e4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/blkdev.h> 46#include <linux/blkdev.h>
47#include <linux/blkpg.h> 47#include <linux/blkpg.h>
48#include <linux/blk-pm.h>
48#include <linux/delay.h> 49#include <linux/delay.h>
49#include <linux/mutex.h> 50#include <linux/mutex.h>
50#include <linux/string_helpers.h> 51#include <linux/string_helpers.h>
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d0389b20574d..4f07b3410595 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -43,6 +43,7 @@
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/blkdev.h> 45#include <linux/blkdev.h>
46#include <linux/blk-pm.h>
46#include <linux/mutex.h> 47#include <linux/mutex.h>
47#include <linux/slab.h> 48#include <linux/slab.h>
48#include <linux/pm_runtime.h> 49#include <linux/pm_runtime.h>
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..b80c65aba249
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _BLK_PM_H_
4#define _BLK_PM_H_
5
6struct device;
7struct request_queue;
8
9/*
10 * block layer runtime pm functions
11 */
12#ifdef CONFIG_PM
13extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
14extern int blk_pre_runtime_suspend(struct request_queue *q);
15extern void blk_post_runtime_suspend(struct request_queue *q, int err);
16extern void blk_pre_runtime_resume(struct request_queue *q);
17extern void blk_post_runtime_resume(struct request_queue *q, int err);
18extern void blk_set_runtime_active(struct request_queue *q);
19#else
20static inline void blk_pm_runtime_init(struct request_queue *q,
21 struct device *dev) {}
22#endif
23
24#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1d5e14139795..cd863511dedb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1281,29 +1281,6 @@ extern void blk_put_queue(struct request_queue *);
1281extern void blk_set_queue_dying(struct request_queue *); 1281extern void blk_set_queue_dying(struct request_queue *);
1282 1282
1283/* 1283/*
1284 * block layer runtime pm functions
1285 */
1286#ifdef CONFIG_PM
1287extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1288extern int blk_pre_runtime_suspend(struct request_queue *q);
1289extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1290extern void blk_pre_runtime_resume(struct request_queue *q);
1291extern void blk_post_runtime_resume(struct request_queue *q, int err);
1292extern void blk_set_runtime_active(struct request_queue *q);
1293#else
1294static inline void blk_pm_runtime_init(struct request_queue *q,
1295 struct device *dev) {}
1296static inline int blk_pre_runtime_suspend(struct request_queue *q)
1297{
1298 return -ENOSYS;
1299}
1300static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1301static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1302static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1303static inline void blk_set_runtime_active(struct request_queue *q) {}
1304#endif
1305
1306/*
1307 * blk_plug permits building a queue of related requests by holding the I/O 1284 * blk_plug permits building a queue of related requests by holding the I/O
1308 * fragments for a short period. This allows merging of sequential requests 1285 * fragments for a short period. This allows merging of sequential requests
1309 * into single larger request. As the requests are moved from a per-task list to 1286 * into single larger request. As the requests are moved from a per-task list to