diff options
author | Mike Galbraith <efault@gmx.de> | 2010-07-07 04:29:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:12:12 -0400 |
commit | a1741e7fcbc19a67520115df480ab17012cc3d0b (patch) | |
tree | ff205a42ca1598a2038352eb2159115969b778d1 /drivers/dca | |
parent | bccc2f7b4c1a7fd3d51e34f9dc3397312afb030b (diff) |
locking, drivers/dca: Annotate dca_lock as raw
The dca_lock can be taken in atomic context:
[ 25.607536] Call Trace:
[ 25.607557] [<ffffffff820078a1>] try_stack_unwind+0x151/0x1a0
[ 25.607566] [<ffffffff820062c2>] dump_trace+0x92/0x370
[ 25.607573] [<ffffffff8200731c>] show_trace_log_lvl+0x5c/0x80
[ 25.607578] [<ffffffff82007355>] show_trace+0x15/0x20
[ 25.607587] [<ffffffff823f4588>] dump_stack+0x77/0x8f
[ 25.607595] [<ffffffff82043f2a>] __might_sleep+0x11a/0x130
[ 25.607602] [<ffffffff823f7b93>] rt_spin_lock+0x83/0x90
[ 25.607611] [<ffffffffa0209138>] dca_common_get_tag+0x28/0x80 [dca]
[ 25.607622] [<ffffffffa02091c8>] dca3_get_tag+0x18/0x20 [dca]
[ 25.607634] [<ffffffffa0244e71>] igb_update_dca+0xb1/0x1d0 [igb]
[ 25.607649] [<ffffffffa0244ff5>] igb_setup_dca+0x65/0x80 [igb]
[ 25.607663] [<ffffffffa02535a6>] igb_probe+0x946/0xe4d [igb]
[ 25.607678] [<ffffffff82247517>] local_pci_probe+0x17/0x20
[ 25.607686] [<ffffffff82248661>] pci_device_probe+0x121/0x130
[ 25.607699] [<ffffffff822e4832>] driver_probe_device+0xd2/0x2e0
[ 25.607707] [<ffffffff822e4adb>] __driver_attach+0x9b/0xa0
[ 25.607714] [<ffffffff822e3d1b>] bus_for_each_dev+0x6b/0xa0
[ 25.607720] [<ffffffff822e4591>] driver_attach+0x21/0x30
[ 25.607727] [<ffffffff822e3425>] bus_add_driver+0x1e5/0x350
[ 25.607734] [<ffffffff822e4e41>] driver_register+0x81/0x160
[ 25.607742] [<ffffffff8224890f>] __pci_register_driver+0x6f/0xf0
[ 25.607752] [<ffffffffa011505b>] igb_init_module+0x5b/0x5d [igb]
[ 25.607769] [<ffffffff820001dd>] do_one_initcall+0x3d/0x1a0
[ 25.607778] [<ffffffff820961f6>] sys_init_module+0xe6/0x270
[ 25.607786] [<ffffffff82003232>] system_call_fastpath+0x16/0x1b
[ 25.607794] [<00007f84d6783f4a>] 0x7f84d6783f4a
and thus must not be preempted on -rt.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Mike Galbraith <efault@gmx.de>
[ Fixed the domain allocation which was calling kzalloc from the irq disabled section ]
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/dca')
-rw-r--r-- | drivers/dca/dca-core.c | 78 |
1 files changed, 44 insertions, 34 deletions
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 4abd089a094f..25ec0bb05198 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION); | |||
35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
36 | MODULE_AUTHOR("Intel Corporation"); | 36 | MODULE_AUTHOR("Intel Corporation"); |
37 | 37 | ||
38 | static DEFINE_SPINLOCK(dca_lock); | 38 | static DEFINE_RAW_SPINLOCK(dca_lock); |
39 | 39 | ||
40 | static LIST_HEAD(dca_domains); | 40 | static LIST_HEAD(dca_domains); |
41 | 41 | ||
@@ -101,10 +101,10 @@ static void unregister_dca_providers(void) | |||
101 | 101 | ||
102 | INIT_LIST_HEAD(&unregistered_providers); | 102 | INIT_LIST_HEAD(&unregistered_providers); |
103 | 103 | ||
104 | spin_lock_irqsave(&dca_lock, flags); | 104 | raw_spin_lock_irqsave(&dca_lock, flags); |
105 | 105 | ||
106 | if (list_empty(&dca_domains)) { | 106 | if (list_empty(&dca_domains)) { |
107 | spin_unlock_irqrestore(&dca_lock, flags); | 107 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | 110 | ||
@@ -116,7 +116,7 @@ static void unregister_dca_providers(void) | |||
116 | 116 | ||
117 | dca_free_domain(domain); | 117 | dca_free_domain(domain); |
118 | 118 | ||
119 | spin_unlock_irqrestore(&dca_lock, flags); | 119 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
120 | 120 | ||
121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { | 121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { |
122 | dca_sysfs_remove_provider(dca); | 122 | dca_sysfs_remove_provider(dca); |
@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev) | |||
144 | domain = dca_find_domain(rc); | 144 | domain = dca_find_domain(rc); |
145 | 145 | ||
146 | if (!domain) { | 146 | if (!domain) { |
147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { | 147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) |
148 | dca_providers_blocked = 1; | 148 | dca_providers_blocked = 1; |
149 | } else { | ||
150 | domain = dca_allocate_domain(rc); | ||
151 | if (domain) | ||
152 | list_add(&domain->node, &dca_domains); | ||
153 | } | ||
154 | } | 149 | } |
155 | 150 | ||
156 | return domain; | 151 | return domain; |
@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev) | |||
198 | if (!dev) | 193 | if (!dev) |
199 | return -EFAULT; | 194 | return -EFAULT; |
200 | 195 | ||
201 | spin_lock_irqsave(&dca_lock, flags); | 196 | raw_spin_lock_irqsave(&dca_lock, flags); |
202 | 197 | ||
203 | /* check if the requester has not been added already */ | 198 | /* check if the requester has not been added already */ |
204 | dca = dca_find_provider_by_dev(dev); | 199 | dca = dca_find_provider_by_dev(dev); |
205 | if (dca) { | 200 | if (dca) { |
206 | spin_unlock_irqrestore(&dca_lock, flags); | 201 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
207 | return -EEXIST; | 202 | return -EEXIST; |
208 | } | 203 | } |
209 | 204 | ||
210 | pci_rc = dca_pci_rc_from_dev(dev); | 205 | pci_rc = dca_pci_rc_from_dev(dev); |
211 | domain = dca_find_domain(pci_rc); | 206 | domain = dca_find_domain(pci_rc); |
212 | if (!domain) { | 207 | if (!domain) { |
213 | spin_unlock_irqrestore(&dca_lock, flags); | 208 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
214 | return -ENODEV; | 209 | return -ENODEV; |
215 | } | 210 | } |
216 | 211 | ||
@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev) | |||
220 | break; | 215 | break; |
221 | } | 216 | } |
222 | 217 | ||
223 | spin_unlock_irqrestore(&dca_lock, flags); | 218 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
224 | 219 | ||
225 | if (slot < 0) | 220 | if (slot < 0) |
226 | return slot; | 221 | return slot; |
227 | 222 | ||
228 | err = dca_sysfs_add_req(dca, dev, slot); | 223 | err = dca_sysfs_add_req(dca, dev, slot); |
229 | if (err) { | 224 | if (err) { |
230 | spin_lock_irqsave(&dca_lock, flags); | 225 | raw_spin_lock_irqsave(&dca_lock, flags); |
231 | if (dca == dca_find_provider_by_dev(dev)) | 226 | if (dca == dca_find_provider_by_dev(dev)) |
232 | dca->ops->remove_requester(dca, dev); | 227 | dca->ops->remove_requester(dca, dev); |
233 | spin_unlock_irqrestore(&dca_lock, flags); | 228 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
234 | return err; | 229 | return err; |
235 | } | 230 | } |
236 | 231 | ||
@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev) | |||
251 | if (!dev) | 246 | if (!dev) |
252 | return -EFAULT; | 247 | return -EFAULT; |
253 | 248 | ||
254 | spin_lock_irqsave(&dca_lock, flags); | 249 | raw_spin_lock_irqsave(&dca_lock, flags); |
255 | dca = dca_find_provider_by_dev(dev); | 250 | dca = dca_find_provider_by_dev(dev); |
256 | if (!dca) { | 251 | if (!dca) { |
257 | spin_unlock_irqrestore(&dca_lock, flags); | 252 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
258 | return -ENODEV; | 253 | return -ENODEV; |
259 | } | 254 | } |
260 | slot = dca->ops->remove_requester(dca, dev); | 255 | slot = dca->ops->remove_requester(dca, dev); |
261 | spin_unlock_irqrestore(&dca_lock, flags); | 256 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
262 | 257 | ||
263 | if (slot < 0) | 258 | if (slot < 0) |
264 | return slot; | 259 | return slot; |
@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu) | |||
280 | u8 tag; | 275 | u8 tag; |
281 | unsigned long flags; | 276 | unsigned long flags; |
282 | 277 | ||
283 | spin_lock_irqsave(&dca_lock, flags); | 278 | raw_spin_lock_irqsave(&dca_lock, flags); |
284 | 279 | ||
285 | dca = dca_find_provider_by_dev(dev); | 280 | dca = dca_find_provider_by_dev(dev); |
286 | if (!dca) { | 281 | if (!dca) { |
287 | spin_unlock_irqrestore(&dca_lock, flags); | 282 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
288 | return -ENODEV; | 283 | return -ENODEV; |
289 | } | 284 | } |
290 | tag = dca->ops->get_tag(dca, dev, cpu); | 285 | tag = dca->ops->get_tag(dca, dev, cpu); |
291 | 286 | ||
292 | spin_unlock_irqrestore(&dca_lock, flags); | 287 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
293 | return tag; | 288 | return tag; |
294 | } | 289 | } |
295 | 290 | ||
@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
360 | { | 355 | { |
361 | int err; | 356 | int err; |
362 | unsigned long flags; | 357 | unsigned long flags; |
363 | struct dca_domain *domain; | 358 | struct dca_domain *domain, *newdomain = NULL; |
364 | 359 | ||
365 | spin_lock_irqsave(&dca_lock, flags); | 360 | raw_spin_lock_irqsave(&dca_lock, flags); |
366 | if (dca_providers_blocked) { | 361 | if (dca_providers_blocked) { |
367 | spin_unlock_irqrestore(&dca_lock, flags); | 362 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
368 | return -ENODEV; | 363 | return -ENODEV; |
369 | } | 364 | } |
370 | spin_unlock_irqrestore(&dca_lock, flags); | 365 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
371 | 366 | ||
372 | err = dca_sysfs_add_provider(dca, dev); | 367 | err = dca_sysfs_add_provider(dca, dev); |
373 | if (err) | 368 | if (err) |
374 | return err; | 369 | return err; |
375 | 370 | ||
376 | spin_lock_irqsave(&dca_lock, flags); | 371 | raw_spin_lock_irqsave(&dca_lock, flags); |
377 | domain = dca_get_domain(dev); | 372 | domain = dca_get_domain(dev); |
378 | if (!domain) { | 373 | if (!domain) { |
374 | struct pci_bus *rc; | ||
375 | |||
379 | if (dca_providers_blocked) { | 376 | if (dca_providers_blocked) { |
380 | spin_unlock_irqrestore(&dca_lock, flags); | 377 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
381 | dca_sysfs_remove_provider(dca); | 378 | dca_sysfs_remove_provider(dca); |
382 | unregister_dca_providers(); | 379 | unregister_dca_providers(); |
383 | } else { | 380 | return -ENODEV; |
384 | spin_unlock_irqrestore(&dca_lock, flags); | 381 | } |
382 | |||
383 | raw_spin_unlock_irqrestore(&dca_lock, flags); | ||
384 | rc = dca_pci_rc_from_dev(dev); | ||
385 | newdomain = dca_allocate_domain(rc); | ||
386 | if (!newdomain) | ||
387 | return -ENODEV; | ||
388 | raw_spin_lock_irqsave(&dca_lock, flags); | ||
389 | /* Recheck, we might have raced after dropping the lock */ | ||
390 | domain = dca_get_domain(dev); | ||
391 | if (!domain) { | ||
392 | domain = newdomain; | ||
393 | newdomain = NULL; | ||
394 | list_add(&domain->node, &dca_domains); | ||
385 | } | 395 | } |
386 | return -ENODEV; | ||
387 | } | 396 | } |
388 | list_add(&dca->node, &domain->dca_providers); | 397 | list_add(&dca->node, &domain->dca_providers); |
389 | spin_unlock_irqrestore(&dca_lock, flags); | 398 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
390 | 399 | ||
391 | blocking_notifier_call_chain(&dca_provider_chain, | 400 | blocking_notifier_call_chain(&dca_provider_chain, |
392 | DCA_PROVIDER_ADD, NULL); | 401 | DCA_PROVIDER_ADD, NULL); |
402 | kfree(newdomain); | ||
393 | return 0; | 403 | return 0; |
394 | } | 404 | } |
395 | EXPORT_SYMBOL_GPL(register_dca_provider); | 405 | EXPORT_SYMBOL_GPL(register_dca_provider); |
@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
407 | blocking_notifier_call_chain(&dca_provider_chain, | 417 | blocking_notifier_call_chain(&dca_provider_chain, |
408 | DCA_PROVIDER_REMOVE, NULL); | 418 | DCA_PROVIDER_REMOVE, NULL); |
409 | 419 | ||
410 | spin_lock_irqsave(&dca_lock, flags); | 420 | raw_spin_lock_irqsave(&dca_lock, flags); |
411 | 421 | ||
412 | list_del(&dca->node); | 422 | list_del(&dca->node); |
413 | 423 | ||
@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
416 | if (list_empty(&domain->dca_providers)) | 426 | if (list_empty(&domain->dca_providers)) |
417 | dca_free_domain(domain); | 427 | dca_free_domain(domain); |
418 | 428 | ||
419 | spin_unlock_irqrestore(&dca_lock, flags); | 429 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
420 | 430 | ||
421 | dca_sysfs_remove_provider(dca); | 431 | dca_sysfs_remove_provider(dca); |
422 | } | 432 | } |