diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 285 |
1 files changed, 257 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 9e9501f82f06..08f809371bbd 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
30 | #include <linux/vgaarb.h> | 30 | #include <linux/vgaarb.h> |
31 | 31 | ||
32 | #include <drm/drm_print.h> | ||
33 | |||
32 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
33 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
34 | 36 | ||
@@ -49,6 +51,218 @@ | |||
49 | * present for a given platform. | 51 | * present for a given platform. |
50 | */ | 52 | */ |
51 | 53 | ||
54 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) | ||
55 | |||
56 | #include <linux/sort.h> | ||
57 | |||
58 | #define STACKDEPTH 8 | ||
59 | |||
60 | static noinline depot_stack_handle_t __save_depot_stack(void) | ||
61 | { | ||
62 | unsigned long entries[STACKDEPTH]; | ||
63 | struct stack_trace trace = { | ||
64 | .entries = entries, | ||
65 | .max_entries = ARRAY_SIZE(entries), | ||
66 | .skip = 1, | ||
67 | }; | ||
68 | |||
69 | save_stack_trace(&trace); | ||
70 | if (trace.nr_entries && | ||
71 | trace.entries[trace.nr_entries - 1] == ULONG_MAX) | ||
72 | trace.nr_entries--; | ||
73 | |||
74 | return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN); | ||
75 | } | ||
76 | |||
77 | static void __print_depot_stack(depot_stack_handle_t stack, | ||
78 | char *buf, int sz, int indent) | ||
79 | { | ||
80 | unsigned long entries[STACKDEPTH]; | ||
81 | struct stack_trace trace = { | ||
82 | .entries = entries, | ||
83 | .max_entries = ARRAY_SIZE(entries), | ||
84 | }; | ||
85 | |||
86 | depot_fetch_stack(stack, &trace); | ||
87 | snprint_stack_trace(buf, sz, &trace, indent); | ||
88 | } | ||
89 | |||
90 | static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
91 | { | ||
92 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | ||
93 | |||
94 | spin_lock_init(&rpm->debug.lock); | ||
95 | } | ||
96 | |||
97 | static noinline void | ||
98 | track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
99 | { | ||
100 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | ||
101 | depot_stack_handle_t stack, *stacks; | ||
102 | unsigned long flags; | ||
103 | |||
104 | atomic_inc(&rpm->wakeref_count); | ||
105 | assert_rpm_wakelock_held(i915); | ||
106 | |||
107 | if (!HAS_RUNTIME_PM(i915)) | ||
108 | return; | ||
109 | |||
110 | stack = __save_depot_stack(); | ||
111 | if (!stack) | ||
112 | return; | ||
113 | |||
114 | spin_lock_irqsave(&rpm->debug.lock, flags); | ||
115 | |||
116 | if (!rpm->debug.count) | ||
117 | rpm->debug.last_acquire = stack; | ||
118 | |||
119 | stacks = krealloc(rpm->debug.owners, | ||
120 | (rpm->debug.count + 1) * sizeof(*stacks), | ||
121 | GFP_NOWAIT | __GFP_NOWARN); | ||
122 | if (stacks) { | ||
123 | stacks[rpm->debug.count++] = stack; | ||
124 | rpm->debug.owners = stacks; | ||
125 | } | ||
126 | |||
127 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | ||
128 | } | ||
129 | |||
130 | static int cmphandle(const void *_a, const void *_b) | ||
131 | { | ||
132 | const depot_stack_handle_t * const a = _a, * const b = _b; | ||
133 | |||
134 | if (*a < *b) | ||
135 | return -1; | ||
136 | else if (*a > *b) | ||
137 | return 1; | ||
138 | else | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void | ||
143 | __print_intel_runtime_pm_wakeref(struct drm_printer *p, | ||
144 | const struct intel_runtime_pm_debug *dbg) | ||
145 | { | ||
146 | unsigned long i; | ||
147 | char *buf; | ||
148 | |||
149 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
150 | if (!buf) | ||
151 | return; | ||
152 | |||
153 | if (dbg->last_acquire) { | ||
154 | __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); | ||
155 | drm_printf(p, "Wakeref last acquired:\n%s", buf); | ||
156 | } | ||
157 | |||
158 | if (dbg->last_release) { | ||
159 | __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); | ||
160 | drm_printf(p, "Wakeref last released:\n%s", buf); | ||
161 | } | ||
162 | |||
163 | drm_printf(p, "Wakeref count: %lu\n", dbg->count); | ||
164 | |||
165 | sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); | ||
166 | |||
167 | for (i = 0; i < dbg->count; i++) { | ||
168 | depot_stack_handle_t stack = dbg->owners[i]; | ||
169 | unsigned long rep; | ||
170 | |||
171 | rep = 1; | ||
172 | while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) | ||
173 | rep++, i++; | ||
174 | __print_depot_stack(stack, buf, PAGE_SIZE, 2); | ||
175 | drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); | ||
176 | } | ||
177 | |||
178 | kfree(buf); | ||
179 | } | ||
180 | |||
181 | static noinline void | ||
182 | untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
183 | { | ||
184 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | ||
185 | struct intel_runtime_pm_debug dbg = {}; | ||
186 | struct drm_printer p; | ||
187 | unsigned long flags; | ||
188 | |||
189 | assert_rpm_wakelock_held(i915); | ||
190 | if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count, | ||
191 | &rpm->debug.lock, | ||
192 | flags)) { | ||
193 | dbg = rpm->debug; | ||
194 | |||
195 | rpm->debug.owners = NULL; | ||
196 | rpm->debug.count = 0; | ||
197 | rpm->debug.last_release = __save_depot_stack(); | ||
198 | |||
199 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | ||
200 | } | ||
201 | if (!dbg.count) | ||
202 | return; | ||
203 | |||
204 | p = drm_debug_printer("i915"); | ||
205 | __print_intel_runtime_pm_wakeref(&p, &dbg); | ||
206 | |||
207 | kfree(dbg.owners); | ||
208 | } | ||
209 | |||
210 | void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, | ||
211 | struct drm_printer *p) | ||
212 | { | ||
213 | struct intel_runtime_pm_debug dbg = {}; | ||
214 | |||
215 | do { | ||
216 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | ||
217 | unsigned long alloc = dbg.count; | ||
218 | depot_stack_handle_t *s; | ||
219 | |||
220 | spin_lock_irq(&rpm->debug.lock); | ||
221 | dbg.count = rpm->debug.count; | ||
222 | if (dbg.count <= alloc) { | ||
223 | memcpy(dbg.owners, | ||
224 | rpm->debug.owners, | ||
225 | dbg.count * sizeof(*s)); | ||
226 | } | ||
227 | dbg.last_acquire = rpm->debug.last_acquire; | ||
228 | dbg.last_release = rpm->debug.last_release; | ||
229 | spin_unlock_irq(&rpm->debug.lock); | ||
230 | if (dbg.count <= alloc) | ||
231 | break; | ||
232 | |||
233 | s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL); | ||
234 | if (!s) | ||
235 | goto out; | ||
236 | |||
237 | dbg.owners = s; | ||
238 | } while (1); | ||
239 | |||
240 | __print_intel_runtime_pm_wakeref(p, &dbg); | ||
241 | |||
242 | out: | ||
243 | kfree(dbg.owners); | ||
244 | } | ||
245 | |||
246 | #else | ||
247 | |||
248 | static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
249 | { | ||
250 | } | ||
251 | |||
252 | static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
253 | { | ||
254 | atomic_inc(&i915->runtime_pm.wakeref_count); | ||
255 | assert_rpm_wakelock_held(i915); | ||
256 | } | ||
257 | |||
258 | static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | ||
259 | { | ||
260 | assert_rpm_wakelock_held(i915); | ||
261 | atomic_dec(&i915->runtime_pm.wakeref_count); | ||
262 | } | ||
263 | |||
264 | #endif | ||
265 | |||
52 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, | 266 | bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, |
53 | enum i915_power_well_id power_well_id); | 267 | enum i915_power_well_id power_well_id); |
54 | 268 | ||
@@ -3986,7 +4200,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) | |||
3986 | 4200 | ||
3987 | /** | 4201 | /** |
3988 | * intel_runtime_pm_get - grab a runtime pm reference | 4202 | * intel_runtime_pm_get - grab a runtime pm reference |
3989 | * @dev_priv: i915 device instance | 4203 | * @i915: i915 device instance |
3990 | * | 4204 | * |
3991 | * This function grabs a device-level runtime pm reference (mostly used for GEM | 4205 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
3992 | * code to ensure the GTT or GT is on) and ensures that it is powered up. | 4206 | * code to ensure the GTT or GT is on) and ensures that it is powered up. |
@@ -3994,22 +4208,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) | |||
3994 | * Any runtime pm reference obtained by this function must have a symmetric | 4208 | * Any runtime pm reference obtained by this function must have a symmetric |
3995 | * call to intel_runtime_pm_put() to release the reference again. | 4209 | * call to intel_runtime_pm_put() to release the reference again. |
3996 | */ | 4210 | */ |
3997 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | 4211 | void intel_runtime_pm_get(struct drm_i915_private *i915) |
3998 | { | 4212 | { |
3999 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4213 | struct pci_dev *pdev = i915->drm.pdev; |
4000 | struct device *kdev = &pdev->dev; | 4214 | struct device *kdev = &pdev->dev; |
4001 | int ret; | 4215 | int ret; |
4002 | 4216 | ||
4003 | ret = pm_runtime_get_sync(kdev); | 4217 | ret = pm_runtime_get_sync(kdev); |
4004 | WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); | 4218 | WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); |
4005 | 4219 | ||
4006 | atomic_inc(&dev_priv->runtime_pm.wakeref_count); | 4220 | track_intel_runtime_pm_wakeref(i915); |
4007 | assert_rpm_wakelock_held(dev_priv); | ||
4008 | } | 4221 | } |
4009 | 4222 | ||
4010 | /** | 4223 | /** |
4011 | * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use | 4224 | * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use |
4012 | * @dev_priv: i915 device instance | 4225 | * @i915: i915 device instance |
4013 | * | 4226 | * |
4014 | * This function grabs a device-level runtime pm reference if the device is | 4227 | * This function grabs a device-level runtime pm reference if the device is |
4015 | * already in use and ensures that it is powered up. It is illegal to try | 4228 | * already in use and ensures that it is powered up. It is illegal to try |
@@ -4020,10 +4233,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) | |||
4020 | * | 4233 | * |
4021 | * Returns: True if the wakeref was acquired, or False otherwise. | 4234 | * Returns: True if the wakeref was acquired, or False otherwise. |
4022 | */ | 4235 | */ |
4023 | bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) | 4236 | bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) |
4024 | { | 4237 | { |
4025 | if (IS_ENABLED(CONFIG_PM)) { | 4238 | if (IS_ENABLED(CONFIG_PM)) { |
4026 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4239 | struct pci_dev *pdev = i915->drm.pdev; |
4027 | struct device *kdev = &pdev->dev; | 4240 | struct device *kdev = &pdev->dev; |
4028 | 4241 | ||
4029 | /* | 4242 | /* |
@@ -4036,15 +4249,14 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) | |||
4036 | return false; | 4249 | return false; |
4037 | } | 4250 | } |
4038 | 4251 | ||
4039 | atomic_inc(&dev_priv->runtime_pm.wakeref_count); | 4252 | track_intel_runtime_pm_wakeref(i915); |
4040 | assert_rpm_wakelock_held(dev_priv); | ||
4041 | 4253 | ||
4042 | return true; | 4254 | return true; |
4043 | } | 4255 | } |
4044 | 4256 | ||
4045 | /** | 4257 | /** |
4046 | * intel_runtime_pm_get_noresume - grab a runtime pm reference | 4258 | * intel_runtime_pm_get_noresume - grab a runtime pm reference |
4047 | * @dev_priv: i915 device instance | 4259 | * @i915: i915 device instance |
4048 | * | 4260 | * |
4049 | * This function grabs a device-level runtime pm reference (mostly used for GEM | 4261 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
4050 | * code to ensure the GTT or GT is on). | 4262 | * code to ensure the GTT or GT is on). |
@@ -4059,32 +4271,31 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) | |||
4059 | * Any runtime pm reference obtained by this function must have a symmetric | 4271 | * Any runtime pm reference obtained by this function must have a symmetric |
4060 | * call to intel_runtime_pm_put() to release the reference again. | 4272 | * call to intel_runtime_pm_put() to release the reference again. |
4061 | */ | 4273 | */ |
4062 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) | 4274 | void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) |
4063 | { | 4275 | { |
4064 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4276 | struct pci_dev *pdev = i915->drm.pdev; |
4065 | struct device *kdev = &pdev->dev; | 4277 | struct device *kdev = &pdev->dev; |
4066 | 4278 | ||
4067 | assert_rpm_wakelock_held(dev_priv); | 4279 | assert_rpm_wakelock_held(i915); |
4068 | pm_runtime_get_noresume(kdev); | 4280 | pm_runtime_get_noresume(kdev); |
4069 | 4281 | ||
4070 | atomic_inc(&dev_priv->runtime_pm.wakeref_count); | 4282 | track_intel_runtime_pm_wakeref(i915); |
4071 | } | 4283 | } |
4072 | 4284 | ||
4073 | /** | 4285 | /** |
4074 | * intel_runtime_pm_put - release a runtime pm reference | 4286 | * intel_runtime_pm_put - release a runtime pm reference |
4075 | * @dev_priv: i915 device instance | 4287 | * @i915: i915 device instance |
4076 | * | 4288 | * |
4077 | * This function drops the device-level runtime pm reference obtained by | 4289 | * This function drops the device-level runtime pm reference obtained by |
4078 | * intel_runtime_pm_get() and might power down the corresponding | 4290 | * intel_runtime_pm_get() and might power down the corresponding |
4079 | * hardware block right away if this is the last reference. | 4291 | * hardware block right away if this is the last reference. |
4080 | */ | 4292 | */ |
4081 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | 4293 | void intel_runtime_pm_put(struct drm_i915_private *i915) |
4082 | { | 4294 | { |
4083 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4295 | struct pci_dev *pdev = i915->drm.pdev; |
4084 | struct device *kdev = &pdev->dev; | 4296 | struct device *kdev = &pdev->dev; |
4085 | 4297 | ||
4086 | assert_rpm_wakelock_held(dev_priv); | 4298 | untrack_intel_runtime_pm_wakeref(i915); |
4087 | atomic_dec(&dev_priv->runtime_pm.wakeref_count); | ||
4088 | 4299 | ||
4089 | pm_runtime_mark_last_busy(kdev); | 4300 | pm_runtime_mark_last_busy(kdev); |
4090 | pm_runtime_put_autosuspend(kdev); | 4301 | pm_runtime_put_autosuspend(kdev); |
@@ -4092,7 +4303,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | |||
4092 | 4303 | ||
4093 | /** | 4304 | /** |
4094 | * intel_runtime_pm_enable - enable runtime pm | 4305 | * intel_runtime_pm_enable - enable runtime pm |
4095 | * @dev_priv: i915 device instance | 4306 | * @i915: i915 device instance |
4096 | * | 4307 | * |
4097 | * This function enables runtime pm at the end of the driver load sequence. | 4308 | * This function enables runtime pm at the end of the driver load sequence. |
4098 | * | 4309 | * |
@@ -4100,9 +4311,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) | |||
4100 | * subordinate display power domains. That is done by | 4311 | * subordinate display power domains. That is done by |
4101 | * intel_power_domains_enable(). | 4312 | * intel_power_domains_enable(). |
4102 | */ | 4313 | */ |
4103 | void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | 4314 | void intel_runtime_pm_enable(struct drm_i915_private *i915) |
4104 | { | 4315 | { |
4105 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4316 | struct pci_dev *pdev = i915->drm.pdev; |
4106 | struct device *kdev = &pdev->dev; | 4317 | struct device *kdev = &pdev->dev; |
4107 | 4318 | ||
4108 | /* | 4319 | /* |
@@ -4124,7 +4335,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | |||
4124 | * so the driver's own RPM reference tracking asserts also work on | 4335 | * so the driver's own RPM reference tracking asserts also work on |
4125 | * platforms without RPM support. | 4336 | * platforms without RPM support. |
4126 | */ | 4337 | */ |
4127 | if (!HAS_RUNTIME_PM(dev_priv)) { | 4338 | if (!HAS_RUNTIME_PM(i915)) { |
4128 | int ret; | 4339 | int ret; |
4129 | 4340 | ||
4130 | pm_runtime_dont_use_autosuspend(kdev); | 4341 | pm_runtime_dont_use_autosuspend(kdev); |
@@ -4142,17 +4353,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) | |||
4142 | pm_runtime_put_autosuspend(kdev); | 4353 | pm_runtime_put_autosuspend(kdev); |
4143 | } | 4354 | } |
4144 | 4355 | ||
4145 | void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) | 4356 | void intel_runtime_pm_disable(struct drm_i915_private *i915) |
4146 | { | 4357 | { |
4147 | struct pci_dev *pdev = dev_priv->drm.pdev; | 4358 | struct pci_dev *pdev = i915->drm.pdev; |
4148 | struct device *kdev = &pdev->dev; | 4359 | struct device *kdev = &pdev->dev; |
4149 | 4360 | ||
4150 | /* Transfer rpm ownership back to core */ | 4361 | /* Transfer rpm ownership back to core */ |
4151 | WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, | 4362 | WARN(pm_runtime_get_sync(kdev) < 0, |
4152 | "Failed to pass rpm ownership back to core\n"); | 4363 | "Failed to pass rpm ownership back to core\n"); |
4153 | 4364 | ||
4154 | pm_runtime_dont_use_autosuspend(kdev); | 4365 | pm_runtime_dont_use_autosuspend(kdev); |
4155 | 4366 | ||
4156 | if (!HAS_RUNTIME_PM(dev_priv)) | 4367 | if (!HAS_RUNTIME_PM(i915)) |
4157 | pm_runtime_put(kdev); | 4368 | pm_runtime_put(kdev); |
4158 | } | 4369 | } |
4370 | |||
4371 | void intel_runtime_pm_cleanup(struct drm_i915_private *i915) | ||
4372 | { | ||
4373 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | ||
4374 | int count; | ||
4375 | |||
4376 | count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */ | ||
4377 | WARN(count, | ||
4378 | "i915->runtime_pm.wakeref_count=%d on cleanup\n", | ||
4379 | count); | ||
4380 | |||
4381 | untrack_intel_runtime_pm_wakeref(i915); | ||
4382 | } | ||
4383 | |||
4384 | void intel_runtime_pm_init_early(struct drm_i915_private *i915) | ||
4385 | { | ||
4386 | init_intel_runtime_pm_wakeref(i915); | ||
4387 | } | ||