diff options
Diffstat (limited to 'drivers/dca')
-rw-r--r-- | drivers/dca/dca-core.c | 78 |
1 files changed, 44 insertions, 34 deletions
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 4abd089a094f..25ec0bb05198 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c | |||
@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION); | |||
35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
36 | MODULE_AUTHOR("Intel Corporation"); | 36 | MODULE_AUTHOR("Intel Corporation"); |
37 | 37 | ||
38 | static DEFINE_SPINLOCK(dca_lock); | 38 | static DEFINE_RAW_SPINLOCK(dca_lock); |
39 | 39 | ||
40 | static LIST_HEAD(dca_domains); | 40 | static LIST_HEAD(dca_domains); |
41 | 41 | ||
@@ -101,10 +101,10 @@ static void unregister_dca_providers(void) | |||
101 | 101 | ||
102 | INIT_LIST_HEAD(&unregistered_providers); | 102 | INIT_LIST_HEAD(&unregistered_providers); |
103 | 103 | ||
104 | spin_lock_irqsave(&dca_lock, flags); | 104 | raw_spin_lock_irqsave(&dca_lock, flags); |
105 | 105 | ||
106 | if (list_empty(&dca_domains)) { | 106 | if (list_empty(&dca_domains)) { |
107 | spin_unlock_irqrestore(&dca_lock, flags); | 107 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | 110 | ||
@@ -116,7 +116,7 @@ static void unregister_dca_providers(void) | |||
116 | 116 | ||
117 | dca_free_domain(domain); | 117 | dca_free_domain(domain); |
118 | 118 | ||
119 | spin_unlock_irqrestore(&dca_lock, flags); | 119 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
120 | 120 | ||
121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { | 121 | list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { |
122 | dca_sysfs_remove_provider(dca); | 122 | dca_sysfs_remove_provider(dca); |
@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain(struct device *dev) | |||
144 | domain = dca_find_domain(rc); | 144 | domain = dca_find_domain(rc); |
145 | 145 | ||
146 | if (!domain) { | 146 | if (!domain) { |
147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { | 147 | if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) |
148 | dca_providers_blocked = 1; | 148 | dca_providers_blocked = 1; |
149 | } else { | ||
150 | domain = dca_allocate_domain(rc); | ||
151 | if (domain) | ||
152 | list_add(&domain->node, &dca_domains); | ||
153 | } | ||
154 | } | 149 | } |
155 | 150 | ||
156 | return domain; | 151 | return domain; |
@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev) | |||
198 | if (!dev) | 193 | if (!dev) |
199 | return -EFAULT; | 194 | return -EFAULT; |
200 | 195 | ||
201 | spin_lock_irqsave(&dca_lock, flags); | 196 | raw_spin_lock_irqsave(&dca_lock, flags); |
202 | 197 | ||
203 | /* check if the requester has not been added already */ | 198 | /* check if the requester has not been added already */ |
204 | dca = dca_find_provider_by_dev(dev); | 199 | dca = dca_find_provider_by_dev(dev); |
205 | if (dca) { | 200 | if (dca) { |
206 | spin_unlock_irqrestore(&dca_lock, flags); | 201 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
207 | return -EEXIST; | 202 | return -EEXIST; |
208 | } | 203 | } |
209 | 204 | ||
210 | pci_rc = dca_pci_rc_from_dev(dev); | 205 | pci_rc = dca_pci_rc_from_dev(dev); |
211 | domain = dca_find_domain(pci_rc); | 206 | domain = dca_find_domain(pci_rc); |
212 | if (!domain) { | 207 | if (!domain) { |
213 | spin_unlock_irqrestore(&dca_lock, flags); | 208 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
214 | return -ENODEV; | 209 | return -ENODEV; |
215 | } | 210 | } |
216 | 211 | ||
@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev) | |||
220 | break; | 215 | break; |
221 | } | 216 | } |
222 | 217 | ||
223 | spin_unlock_irqrestore(&dca_lock, flags); | 218 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
224 | 219 | ||
225 | if (slot < 0) | 220 | if (slot < 0) |
226 | return slot; | 221 | return slot; |
227 | 222 | ||
228 | err = dca_sysfs_add_req(dca, dev, slot); | 223 | err = dca_sysfs_add_req(dca, dev, slot); |
229 | if (err) { | 224 | if (err) { |
230 | spin_lock_irqsave(&dca_lock, flags); | 225 | raw_spin_lock_irqsave(&dca_lock, flags); |
231 | if (dca == dca_find_provider_by_dev(dev)) | 226 | if (dca == dca_find_provider_by_dev(dev)) |
232 | dca->ops->remove_requester(dca, dev); | 227 | dca->ops->remove_requester(dca, dev); |
233 | spin_unlock_irqrestore(&dca_lock, flags); | 228 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
234 | return err; | 229 | return err; |
235 | } | 230 | } |
236 | 231 | ||
@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *dev) | |||
251 | if (!dev) | 246 | if (!dev) |
252 | return -EFAULT; | 247 | return -EFAULT; |
253 | 248 | ||
254 | spin_lock_irqsave(&dca_lock, flags); | 249 | raw_spin_lock_irqsave(&dca_lock, flags); |
255 | dca = dca_find_provider_by_dev(dev); | 250 | dca = dca_find_provider_by_dev(dev); |
256 | if (!dca) { | 251 | if (!dca) { |
257 | spin_unlock_irqrestore(&dca_lock, flags); | 252 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
258 | return -ENODEV; | 253 | return -ENODEV; |
259 | } | 254 | } |
260 | slot = dca->ops->remove_requester(dca, dev); | 255 | slot = dca->ops->remove_requester(dca, dev); |
261 | spin_unlock_irqrestore(&dca_lock, flags); | 256 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
262 | 257 | ||
263 | if (slot < 0) | 258 | if (slot < 0) |
264 | return slot; | 259 | return slot; |
@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu) | |||
280 | u8 tag; | 275 | u8 tag; |
281 | unsigned long flags; | 276 | unsigned long flags; |
282 | 277 | ||
283 | spin_lock_irqsave(&dca_lock, flags); | 278 | raw_spin_lock_irqsave(&dca_lock, flags); |
284 | 279 | ||
285 | dca = dca_find_provider_by_dev(dev); | 280 | dca = dca_find_provider_by_dev(dev); |
286 | if (!dca) { | 281 | if (!dca) { |
287 | spin_unlock_irqrestore(&dca_lock, flags); | 282 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
288 | return -ENODEV; | 283 | return -ENODEV; |
289 | } | 284 | } |
290 | tag = dca->ops->get_tag(dca, dev, cpu); | 285 | tag = dca->ops->get_tag(dca, dev, cpu); |
291 | 286 | ||
292 | spin_unlock_irqrestore(&dca_lock, flags); | 287 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
293 | return tag; | 288 | return tag; |
294 | } | 289 | } |
295 | 290 | ||
@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) | |||
360 | { | 355 | { |
361 | int err; | 356 | int err; |
362 | unsigned long flags; | 357 | unsigned long flags; |
363 | struct dca_domain *domain; | 358 | struct dca_domain *domain, *newdomain = NULL; |
364 | 359 | ||
365 | spin_lock_irqsave(&dca_lock, flags); | 360 | raw_spin_lock_irqsave(&dca_lock, flags); |
366 | if (dca_providers_blocked) { | 361 | if (dca_providers_blocked) { |
367 | spin_unlock_irqrestore(&dca_lock, flags); | 362 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
368 | return -ENODEV; | 363 | return -ENODEV; |
369 | } | 364 | } |
370 | spin_unlock_irqrestore(&dca_lock, flags); | 365 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
371 | 366 | ||
372 | err = dca_sysfs_add_provider(dca, dev); | 367 | err = dca_sysfs_add_provider(dca, dev); |
373 | if (err) | 368 | if (err) |
374 | return err; | 369 | return err; |
375 | 370 | ||
376 | spin_lock_irqsave(&dca_lock, flags); | 371 | raw_spin_lock_irqsave(&dca_lock, flags); |
377 | domain = dca_get_domain(dev); | 372 | domain = dca_get_domain(dev); |
378 | if (!domain) { | 373 | if (!domain) { |
374 | struct pci_bus *rc; | ||
375 | |||
379 | if (dca_providers_blocked) { | 376 | if (dca_providers_blocked) { |
380 | spin_unlock_irqrestore(&dca_lock, flags); | 377 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
381 | dca_sysfs_remove_provider(dca); | 378 | dca_sysfs_remove_provider(dca); |
382 | unregister_dca_providers(); | 379 | unregister_dca_providers(); |
383 | } else { | 380 | return -ENODEV; |
384 | spin_unlock_irqrestore(&dca_lock, flags); | 381 | } |
382 | |||
383 | raw_spin_unlock_irqrestore(&dca_lock, flags); | ||
384 | rc = dca_pci_rc_from_dev(dev); | ||
385 | newdomain = dca_allocate_domain(rc); | ||
386 | if (!newdomain) | ||
387 | return -ENODEV; | ||
388 | raw_spin_lock_irqsave(&dca_lock, flags); | ||
389 | /* Recheck, we might have raced after dropping the lock */ | ||
390 | domain = dca_get_domain(dev); | ||
391 | if (!domain) { | ||
392 | domain = newdomain; | ||
393 | newdomain = NULL; | ||
394 | list_add(&domain->node, &dca_domains); | ||
385 | } | 395 | } |
386 | return -ENODEV; | ||
387 | } | 396 | } |
388 | list_add(&dca->node, &domain->dca_providers); | 397 | list_add(&dca->node, &domain->dca_providers); |
389 | spin_unlock_irqrestore(&dca_lock, flags); | 398 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
390 | 399 | ||
391 | blocking_notifier_call_chain(&dca_provider_chain, | 400 | blocking_notifier_call_chain(&dca_provider_chain, |
392 | DCA_PROVIDER_ADD, NULL); | 401 | DCA_PROVIDER_ADD, NULL); |
402 | kfree(newdomain); | ||
393 | return 0; | 403 | return 0; |
394 | } | 404 | } |
395 | EXPORT_SYMBOL_GPL(register_dca_provider); | 405 | EXPORT_SYMBOL_GPL(register_dca_provider); |
@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
407 | blocking_notifier_call_chain(&dca_provider_chain, | 417 | blocking_notifier_call_chain(&dca_provider_chain, |
408 | DCA_PROVIDER_REMOVE, NULL); | 418 | DCA_PROVIDER_REMOVE, NULL); |
409 | 419 | ||
410 | spin_lock_irqsave(&dca_lock, flags); | 420 | raw_spin_lock_irqsave(&dca_lock, flags); |
411 | 421 | ||
412 | list_del(&dca->node); | 422 | list_del(&dca->node); |
413 | 423 | ||
@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) | |||
416 | if (list_empty(&domain->dca_providers)) | 426 | if (list_empty(&domain->dca_providers)) |
417 | dca_free_domain(domain); | 427 | dca_free_domain(domain); |
418 | 428 | ||
419 | spin_unlock_irqrestore(&dca_lock, flags); | 429 | raw_spin_unlock_irqrestore(&dca_lock, flags); |
420 | 430 | ||
421 | dca_sysfs_remove_provider(dca); | 431 | dca_sysfs_remove_provider(dca); |
422 | } | 432 | } |