aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_runtime_pm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-14 09:21:10 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-14 11:17:53 -0500
commit16e4dd0342a804090fd0958bb271d3a6b57056ac (patch)
tree6bb32435cc05f13a890e04f6cc6c057c1aa7b3d8 /drivers/gpu/drm/i915/intel_runtime_pm.c
parentbd780f37a3617d3dda74b97013ae8aa9b07a1d91 (diff)
drm/i915: Markup paired operations on wakerefs
The majority of runtime-pm operations are bounded and scoped within a function; these are easy to verify that the wakeref are handled correctly. We can employ the compiler to help us, and reduce the number of wakerefs tracked when debugging, by passing around cookies provided by the various rpm_get functions to their rpm_put counterpart. This makes the pairing explicit, and given the required wakeref cookie the compiler can verify that we pass an initialised value to the rpm_put (quite handy for double checking error paths). For regular builds, the compiler should be able to eliminate the unused local variables and the program growth should be minimal. Fwiw, it came out as a net improvement as gcc was able to refactor rpm_get and rpm_get_if_in_use together, v2: Just s/rpm_put/rpm_put_unchecked/ everywhere, leaving the manual mark up for smaller more targeted patches. v3: Mention the cookie in Returns Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_runtime_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c97
1 files changed, 79 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 08f809371bbd..c29577d7a35a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -94,7 +94,7 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
94 spin_lock_init(&rpm->debug.lock); 94 spin_lock_init(&rpm->debug.lock);
95} 95}
96 96
97static noinline void 97static noinline depot_stack_handle_t
98track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) 98track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
99{ 99{
100 struct i915_runtime_pm *rpm = &i915->runtime_pm; 100 struct i915_runtime_pm *rpm = &i915->runtime_pm;
@@ -105,11 +105,11 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
105 assert_rpm_wakelock_held(i915); 105 assert_rpm_wakelock_held(i915);
106 106
107 if (!HAS_RUNTIME_PM(i915)) 107 if (!HAS_RUNTIME_PM(i915))
108 return; 108 return -1;
109 109
110 stack = __save_depot_stack(); 110 stack = __save_depot_stack();
111 if (!stack) 111 if (!stack)
112 return; 112 return -1;
113 113
114 spin_lock_irqsave(&rpm->debug.lock, flags); 114 spin_lock_irqsave(&rpm->debug.lock, flags);
115 115
@@ -122,9 +122,57 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
122 if (stacks) { 122 if (stacks) {
123 stacks[rpm->debug.count++] = stack; 123 stacks[rpm->debug.count++] = stack;
124 rpm->debug.owners = stacks; 124 rpm->debug.owners = stacks;
125 } else {
126 stack = -1;
125 } 127 }
126 128
127 spin_unlock_irqrestore(&rpm->debug.lock, flags); 129 spin_unlock_irqrestore(&rpm->debug.lock, flags);
130
131 return stack;
132}
133
134static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
135 depot_stack_handle_t stack)
136{
137 struct i915_runtime_pm *rpm = &i915->runtime_pm;
138 unsigned long flags, n;
139 bool found = false;
140
141 if (unlikely(stack == -1))
142 return;
143
144 spin_lock_irqsave(&rpm->debug.lock, flags);
145 for (n = rpm->debug.count; n--; ) {
146 if (rpm->debug.owners[n] == stack) {
147 memmove(rpm->debug.owners + n,
148 rpm->debug.owners + n + 1,
149 (--rpm->debug.count - n) * sizeof(stack));
150 found = true;
151 break;
152 }
153 }
154 spin_unlock_irqrestore(&rpm->debug.lock, flags);
155
156 if (WARN(!found,
157 "Unmatched wakeref (tracking %lu), count %u\n",
158 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
159 char *buf;
160
161 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
162 if (!buf)
163 return;
164
165 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
166 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
167
168 stack = READ_ONCE(rpm->debug.last_release);
169 if (stack) {
170 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
171 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
172 }
173
174 kfree(buf);
175 }
128} 176}
129 177
130static int cmphandle(const void *_a, const void *_b) 178static int cmphandle(const void *_a, const void *_b)
@@ -249,10 +297,12 @@ static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
249{ 297{
250} 298}
251 299
252static void track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) 300static depot_stack_handle_t
301track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
253{ 302{
254 atomic_inc(&i915->runtime_pm.wakeref_count); 303 atomic_inc(&i915->runtime_pm.wakeref_count);
255 assert_rpm_wakelock_held(i915); 304 assert_rpm_wakelock_held(i915);
305 return -1;
256} 306}
257 307
258static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915) 308static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
@@ -1852,7 +1902,7 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1852 mutex_unlock(&power_domains->lock); 1902 mutex_unlock(&power_domains->lock);
1853 1903
1854 if (!is_enabled) 1904 if (!is_enabled)
1855 intel_runtime_pm_put(dev_priv); 1905 intel_runtime_pm_put_unchecked(dev_priv);
1856 1906
1857 return is_enabled; 1907 return is_enabled;
1858} 1908}
@@ -1886,7 +1936,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1886 1936
1887 mutex_unlock(&power_domains->lock); 1937 mutex_unlock(&power_domains->lock);
1888 1938
1889 intel_runtime_pm_put(dev_priv); 1939 intel_runtime_pm_put_unchecked(dev_priv);
1890} 1940}
1891 1941
1892#define I830_PIPES_POWER_DOMAINS ( \ 1942#define I830_PIPES_POWER_DOMAINS ( \
@@ -3994,7 +4044,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3994void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) 4044void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
3995{ 4045{
3996 /* Keep the power well enabled, but cancel its rpm wakeref. */ 4046 /* Keep the power well enabled, but cancel its rpm wakeref. */
3997 intel_runtime_pm_put(dev_priv); 4047 intel_runtime_pm_put_unchecked(dev_priv);
3998 4048
3999 /* Remove the refcount we took to keep power well support disabled. */ 4049 /* Remove the refcount we took to keep power well support disabled. */
4000 if (!i915_modparams.disable_power_well) 4050 if (!i915_modparams.disable_power_well)
@@ -4207,8 +4257,10 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
4207 * 4257 *
4208 * Any runtime pm reference obtained by this function must have a symmetric 4258 * Any runtime pm reference obtained by this function must have a symmetric
4209 * call to intel_runtime_pm_put() to release the reference again. 4259 * call to intel_runtime_pm_put() to release the reference again.
4260 *
4261 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4210 */ 4262 */
4211void intel_runtime_pm_get(struct drm_i915_private *i915) 4263intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
4212{ 4264{
4213 struct pci_dev *pdev = i915->drm.pdev; 4265 struct pci_dev *pdev = i915->drm.pdev;
4214 struct device *kdev = &pdev->dev; 4266 struct device *kdev = &pdev->dev;
@@ -4217,7 +4269,7 @@ void intel_runtime_pm_get(struct drm_i915_private *i915)
4217 ret = pm_runtime_get_sync(kdev); 4269 ret = pm_runtime_get_sync(kdev);
4218 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); 4270 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4219 4271
4220 track_intel_runtime_pm_wakeref(i915); 4272 return track_intel_runtime_pm_wakeref(i915);
4221} 4273}
4222 4274
4223/** 4275/**
@@ -4231,9 +4283,10 @@ void intel_runtime_pm_get(struct drm_i915_private *i915)
4231 * Any runtime pm reference obtained by this function must have a symmetric 4283 * Any runtime pm reference obtained by this function must have a symmetric
4232 * call to intel_runtime_pm_put() to release the reference again. 4284 * call to intel_runtime_pm_put() to release the reference again.
4233 * 4285 *
4234 * Returns: True if the wakeref was acquired, or False otherwise. 4286 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4287 * as True if the wakeref was acquired, or False otherwise.
4235 */ 4288 */
4236bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) 4289intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4237{ 4290{
4238 if (IS_ENABLED(CONFIG_PM)) { 4291 if (IS_ENABLED(CONFIG_PM)) {
4239 struct pci_dev *pdev = i915->drm.pdev; 4292 struct pci_dev *pdev = i915->drm.pdev;
@@ -4246,12 +4299,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4246 * atm to the late/early system suspend/resume handlers. 4299 * atm to the late/early system suspend/resume handlers.
4247 */ 4300 */
4248 if (pm_runtime_get_if_in_use(kdev) <= 0) 4301 if (pm_runtime_get_if_in_use(kdev) <= 0)
4249 return false; 4302 return 0;
4250 } 4303 }
4251 4304
4252 track_intel_runtime_pm_wakeref(i915); 4305 return track_intel_runtime_pm_wakeref(i915);
4253
4254 return true;
4255} 4306}
4256 4307
4257/** 4308/**
@@ -4270,8 +4321,10 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4270 * 4321 *
4271 * Any runtime pm reference obtained by this function must have a symmetric 4322 * Any runtime pm reference obtained by this function must have a symmetric
4272 * call to intel_runtime_pm_put() to release the reference again. 4323 * call to intel_runtime_pm_put() to release the reference again.
4324 *
4325 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4273 */ 4326 */
4274void intel_runtime_pm_get_noresume(struct drm_i915_private *i915) 4327intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4275{ 4328{
4276 struct pci_dev *pdev = i915->drm.pdev; 4329 struct pci_dev *pdev = i915->drm.pdev;
4277 struct device *kdev = &pdev->dev; 4330 struct device *kdev = &pdev->dev;
@@ -4279,7 +4332,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4279 assert_rpm_wakelock_held(i915); 4332 assert_rpm_wakelock_held(i915);
4280 pm_runtime_get_noresume(kdev); 4333 pm_runtime_get_noresume(kdev);
4281 4334
4282 track_intel_runtime_pm_wakeref(i915); 4335 return track_intel_runtime_pm_wakeref(i915);
4283} 4336}
4284 4337
4285/** 4338/**
@@ -4290,7 +4343,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4290 * intel_runtime_pm_get() and might power down the corresponding 4343 * intel_runtime_pm_get() and might power down the corresponding
4291 * hardware block right away if this is the last reference. 4344 * hardware block right away if this is the last reference.
4292 */ 4345 */
4293void intel_runtime_pm_put(struct drm_i915_private *i915) 4346void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
4294{ 4347{
4295 struct pci_dev *pdev = i915->drm.pdev; 4348 struct pci_dev *pdev = i915->drm.pdev;
4296 struct device *kdev = &pdev->dev; 4349 struct device *kdev = &pdev->dev;
@@ -4301,6 +4354,14 @@ void intel_runtime_pm_put(struct drm_i915_private *i915)
4301 pm_runtime_put_autosuspend(kdev); 4354 pm_runtime_put_autosuspend(kdev);
4302} 4355}
4303 4356
4357#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4358void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4359{
4360 cancel_intel_runtime_pm_wakeref(i915, wref);
4361 intel_runtime_pm_put_unchecked(i915);
4362}
4363#endif
4364
4304/** 4365/**
4305 * intel_runtime_pm_enable - enable runtime pm 4366 * intel_runtime_pm_enable - enable runtime pm
4306 * @i915: i915 device instance 4367 * @i915: i915 device instance