diff options
author | Christoph Hellwig <hch@lst.de> | 2007-07-20 15:39:51 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-07-20 15:42:19 -0400 |
commit | 2414059420311e5384de646eebfd529c184afd3c (patch) | |
tree | fbe0aa9560b9e188cb2d39d9166bbc84b4d6f8ee /arch/powerpc | |
parent | 9e7cbcbb6ede4299d52c839e352aae527c06124a (diff) |
[CELL] spu_base: locking cleanup
Sort out the locking mess in spu_base and document the current rules.
As an added benefit spu_alloc* and spu_free don't block anymore.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 84 |
1 files changed, 51 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 03b4a8eb9044..8617b507af49 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -42,12 +42,30 @@ const struct spu_management_ops *spu_management_ops; | |||
42 | EXPORT_SYMBOL_GPL(spu_management_ops); | 42 | EXPORT_SYMBOL_GPL(spu_management_ops); |
43 | 43 | ||
44 | const struct spu_priv1_ops *spu_priv1_ops; | 44 | const struct spu_priv1_ops *spu_priv1_ops; |
45 | EXPORT_SYMBOL_GPL(spu_priv1_ops); | ||
45 | 46 | ||
46 | static LIST_HEAD(spu_full_list); | 47 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
47 | static DEFINE_MUTEX(spu_mutex); | 48 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
48 | static DEFINE_SPINLOCK(spu_list_lock); | ||
49 | 49 | ||
50 | EXPORT_SYMBOL_GPL(spu_priv1_ops); | 50 | /* |
51 | * Protects cbe_spu_info and spu->number. | ||
52 | */ | ||
53 | static DEFINE_SPINLOCK(spu_lock); | ||
54 | |||
55 | /* | ||
56 | * List of all spus in the system. | ||
57 | * | ||
58 | * This list is iterated by callers from irq context and callers that | ||
59 | * want to sleep. Thus modifications need to be done with both | ||
60 | * spu_full_list_lock and spu_full_list_mutex held, while iterating | ||
61 | * through it requires either of these locks. | ||
62 | * | ||
63 | * In addition spu_full_list_lock protects all assignmens to | ||
64 | * spu->mm. | ||
65 | */ | ||
66 | static LIST_HEAD(spu_full_list); | ||
67 | static DEFINE_SPINLOCK(spu_full_list_lock); | ||
68 | static DEFINE_MUTEX(spu_full_list_mutex); | ||
51 | 69 | ||
52 | void spu_invalidate_slbs(struct spu *spu) | 70 | void spu_invalidate_slbs(struct spu *spu) |
53 | { | 71 | { |
@@ -66,12 +84,12 @@ void spu_flush_all_slbs(struct mm_struct *mm) | |||
66 | struct spu *spu; | 84 | struct spu *spu; |
67 | unsigned long flags; | 85 | unsigned long flags; |
68 | 86 | ||
69 | spin_lock_irqsave(&spu_list_lock, flags); | 87 | spin_lock_irqsave(&spu_full_list_lock, flags); |
70 | list_for_each_entry(spu, &spu_full_list, full_list) { | 88 | list_for_each_entry(spu, &spu_full_list, full_list) { |
71 | if (spu->mm == mm) | 89 | if (spu->mm == mm) |
72 | spu_invalidate_slbs(spu); | 90 | spu_invalidate_slbs(spu); |
73 | } | 91 | } |
74 | spin_unlock_irqrestore(&spu_list_lock, flags); | 92 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
75 | } | 93 | } |
76 | 94 | ||
77 | /* The hack below stinks... try to do something better one of | 95 | /* The hack below stinks... try to do something better one of |
@@ -89,9 +107,9 @@ void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |||
89 | { | 107 | { |
90 | unsigned long flags; | 108 | unsigned long flags; |
91 | 109 | ||
92 | spin_lock_irqsave(&spu_list_lock, flags); | 110 | spin_lock_irqsave(&spu_full_list_lock, flags); |
93 | spu->mm = mm; | 111 | spu->mm = mm; |
94 | spin_unlock_irqrestore(&spu_list_lock, flags); | 112 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
95 | if (mm) | 113 | if (mm) |
96 | mm_needs_global_tlbie(mm); | 114 | mm_needs_global_tlbie(mm); |
97 | } | 115 | } |
@@ -429,7 +447,7 @@ struct spu *spu_alloc_spu(struct spu *req_spu) | |||
429 | { | 447 | { |
430 | struct spu *spu, *ret = NULL; | 448 | struct spu *spu, *ret = NULL; |
431 | 449 | ||
432 | mutex_lock(&spu_mutex); | 450 | spin_lock(&spu_lock); |
433 | list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) { | 451 | list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) { |
434 | if (spu == req_spu) { | 452 | if (spu == req_spu) { |
435 | list_del_init(&spu->list); | 453 | list_del_init(&spu->list); |
@@ -439,7 +457,7 @@ struct spu *spu_alloc_spu(struct spu *req_spu) | |||
439 | break; | 457 | break; |
440 | } | 458 | } |
441 | } | 459 | } |
442 | mutex_unlock(&spu_mutex); | 460 | spin_unlock(&spu_lock); |
443 | return ret; | 461 | return ret; |
444 | } | 462 | } |
445 | EXPORT_SYMBOL_GPL(spu_alloc_spu); | 463 | EXPORT_SYMBOL_GPL(spu_alloc_spu); |
@@ -448,14 +466,14 @@ struct spu *spu_alloc_node(int node) | |||
448 | { | 466 | { |
449 | struct spu *spu = NULL; | 467 | struct spu *spu = NULL; |
450 | 468 | ||
451 | mutex_lock(&spu_mutex); | 469 | spin_lock(&spu_lock); |
452 | if (!list_empty(&cbe_spu_info[node].free_spus)) { | 470 | if (!list_empty(&cbe_spu_info[node].free_spus)) { |
453 | spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu, | 471 | spu = list_entry(cbe_spu_info[node].free_spus.next, struct spu, |
454 | list); | 472 | list); |
455 | list_del_init(&spu->list); | 473 | list_del_init(&spu->list); |
456 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); | 474 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); |
457 | } | 475 | } |
458 | mutex_unlock(&spu_mutex); | 476 | spin_unlock(&spu_lock); |
459 | 477 | ||
460 | if (spu) | 478 | if (spu) |
461 | spu_init_channels(spu); | 479 | spu_init_channels(spu); |
@@ -479,9 +497,9 @@ struct spu *spu_alloc(void) | |||
479 | 497 | ||
480 | void spu_free(struct spu *spu) | 498 | void spu_free(struct spu *spu) |
481 | { | 499 | { |
482 | mutex_lock(&spu_mutex); | 500 | spin_lock(&spu_lock); |
483 | list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus); | 501 | list_add_tail(&spu->list, &cbe_spu_info[spu->node].free_spus); |
484 | mutex_unlock(&spu_mutex); | 502 | spin_unlock(&spu_lock); |
485 | } | 503 | } |
486 | EXPORT_SYMBOL_GPL(spu_free); | 504 | EXPORT_SYMBOL_GPL(spu_free); |
487 | 505 | ||
@@ -502,12 +520,12 @@ struct sysdev_class spu_sysdev_class = { | |||
502 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) | 520 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
503 | { | 521 | { |
504 | struct spu *spu; | 522 | struct spu *spu; |
505 | mutex_lock(&spu_mutex); | ||
506 | 523 | ||
524 | mutex_lock(&spu_full_list_mutex); | ||
507 | list_for_each_entry(spu, &spu_full_list, full_list) | 525 | list_for_each_entry(spu, &spu_full_list, full_list) |
508 | sysdev_create_file(&spu->sysdev, attr); | 526 | sysdev_create_file(&spu->sysdev, attr); |
527 | mutex_unlock(&spu_full_list_mutex); | ||
509 | 528 | ||
510 | mutex_unlock(&spu_mutex); | ||
511 | return 0; | 529 | return 0; |
512 | } | 530 | } |
513 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | 531 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); |
@@ -515,12 +533,12 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |||
515 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | 533 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) |
516 | { | 534 | { |
517 | struct spu *spu; | 535 | struct spu *spu; |
518 | mutex_lock(&spu_mutex); | ||
519 | 536 | ||
537 | mutex_lock(&spu_full_list_mutex); | ||
520 | list_for_each_entry(spu, &spu_full_list, full_list) | 538 | list_for_each_entry(spu, &spu_full_list, full_list) |
521 | sysfs_create_group(&spu->sysdev.kobj, attrs); | 539 | sysfs_create_group(&spu->sysdev.kobj, attrs); |
540 | mutex_unlock(&spu_full_list_mutex); | ||
522 | 541 | ||
523 | mutex_unlock(&spu_mutex); | ||
524 | return 0; | 542 | return 0; |
525 | } | 543 | } |
526 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | 544 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); |
@@ -529,24 +547,22 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | |||
529 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) | 547 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) |
530 | { | 548 | { |
531 | struct spu *spu; | 549 | struct spu *spu; |
532 | mutex_lock(&spu_mutex); | ||
533 | 550 | ||
551 | mutex_lock(&spu_full_list_mutex); | ||
534 | list_for_each_entry(spu, &spu_full_list, full_list) | 552 | list_for_each_entry(spu, &spu_full_list, full_list) |
535 | sysdev_remove_file(&spu->sysdev, attr); | 553 | sysdev_remove_file(&spu->sysdev, attr); |
536 | 554 | mutex_unlock(&spu_full_list_mutex); | |
537 | mutex_unlock(&spu_mutex); | ||
538 | } | 555 | } |
539 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); | 556 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); |
540 | 557 | ||
541 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) | 558 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) |
542 | { | 559 | { |
543 | struct spu *spu; | 560 | struct spu *spu; |
544 | mutex_lock(&spu_mutex); | ||
545 | 561 | ||
562 | mutex_lock(&spu_full_list_mutex); | ||
546 | list_for_each_entry(spu, &spu_full_list, full_list) | 563 | list_for_each_entry(spu, &spu_full_list, full_list) |
547 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | 564 | sysfs_remove_group(&spu->sysdev.kobj, attrs); |
548 | 565 | mutex_unlock(&spu_full_list_mutex); | |
549 | mutex_unlock(&spu_mutex); | ||
550 | } | 566 | } |
551 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); | 567 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); |
552 | 568 | ||
@@ -582,9 +598,9 @@ static int __init create_spu(void *data) | |||
582 | goto out; | 598 | goto out; |
583 | 599 | ||
584 | spin_lock_init(&spu->register_lock); | 600 | spin_lock_init(&spu->register_lock); |
585 | mutex_lock(&spu_mutex); | 601 | spin_lock(&spu_lock); |
586 | spu->number = number++; | 602 | spu->number = number++; |
587 | mutex_unlock(&spu_mutex); | 603 | spin_unlock(&spu_lock); |
588 | 604 | ||
589 | ret = spu_create_spu(spu, data); | 605 | ret = spu_create_spu(spu, data); |
590 | 606 | ||
@@ -601,14 +617,17 @@ static int __init create_spu(void *data) | |||
601 | if (ret) | 617 | if (ret) |
602 | goto out_free_irqs; | 618 | goto out_free_irqs; |
603 | 619 | ||
604 | mutex_lock(&spu_mutex); | 620 | spin_lock(&spu_lock); |
605 | spin_lock_irqsave(&spu_list_lock, flags); | ||
606 | list_add(&spu->list, &cbe_spu_info[spu->node].free_spus); | 621 | list_add(&spu->list, &cbe_spu_info[spu->node].free_spus); |
607 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); | 622 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
608 | cbe_spu_info[spu->node].n_spus++; | 623 | cbe_spu_info[spu->node].n_spus++; |
624 | spin_unlock(&spu_lock); | ||
625 | |||
626 | mutex_lock(&spu_full_list_mutex); | ||
627 | spin_lock_irqsave(&spu_full_list_lock, flags); | ||
609 | list_add(&spu->full_list, &spu_full_list); | 628 | list_add(&spu->full_list, &spu_full_list); |
610 | spin_unlock_irqrestore(&spu_list_lock, flags); | 629 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
611 | mutex_unlock(&spu_mutex); | 630 | mutex_unlock(&spu_full_list_mutex); |
612 | 631 | ||
613 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; | 632 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
614 | ktime_get_ts(&ts); | 633 | ktime_get_ts(&ts); |
@@ -675,9 +694,6 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) | |||
675 | 694 | ||
676 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | 695 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); |
677 | 696 | ||
678 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; | ||
679 | EXPORT_SYMBOL_GPL(cbe_spu_info); | ||
680 | |||
681 | /* Hardcoded affinity idxs for QS20 */ | 697 | /* Hardcoded affinity idxs for QS20 */ |
682 | #define SPES_PER_BE 8 | 698 | #define SPES_PER_BE 8 |
683 | static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; | 699 | static int QS20_reg_idxs[SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; |
@@ -847,8 +863,10 @@ static int __init init_spu_base(void) | |||
847 | fb_append_extra_logo(&logo_spe_clut224, ret); | 863 | fb_append_extra_logo(&logo_spe_clut224, ret); |
848 | } | 864 | } |
849 | 865 | ||
866 | mutex_lock(&spu_full_list_mutex); | ||
850 | xmon_register_spus(&spu_full_list); | 867 | xmon_register_spus(&spu_full_list); |
851 | crash_register_spus(&spu_full_list); | 868 | crash_register_spus(&spu_full_list); |
869 | mutex_unlock(&spu_full_list_mutex); | ||
852 | spu_add_sysdev_attr(&attr_stat); | 870 | spu_add_sysdev_attr(&attr_stat); |
853 | 871 | ||
854 | if (of_has_vicinity()) { | 872 | if (of_has_vicinity()) { |