aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spu_base.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c81
1 files changed, 69 insertions, 12 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index c43999a10deb..eba7a2641dce 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -38,8 +38,61 @@
38const struct spu_management_ops *spu_management_ops; 38const struct spu_management_ops *spu_management_ops;
39const struct spu_priv1_ops *spu_priv1_ops; 39const struct spu_priv1_ops *spu_priv1_ops;
40 40
41static struct list_head spu_list[MAX_NUMNODES];
42static LIST_HEAD(spu_full_list);
43static DEFINE_MUTEX(spu_mutex);
44static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
45
41EXPORT_SYMBOL_GPL(spu_priv1_ops); 46EXPORT_SYMBOL_GPL(spu_priv1_ops);
42 47
48void spu_invalidate_slbs(struct spu *spu)
49{
50 struct spu_priv2 __iomem *priv2 = spu->priv2;
51
52 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
53 out_be64(&priv2->slb_invalidate_all_W, 0UL);
54}
55EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
56
57/* This is called by the MM core when a segment size is changed, to
58 * request a flush of all the SPEs using a given mm
59 */
60void spu_flush_all_slbs(struct mm_struct *mm)
61{
62 struct spu *spu;
63 unsigned long flags;
64
65 spin_lock_irqsave(&spu_list_lock, flags);
66 list_for_each_entry(spu, &spu_full_list, full_list) {
67 if (spu->mm == mm)
68 spu_invalidate_slbs(spu);
69 }
70 spin_unlock_irqrestore(&spu_list_lock, flags);
71}
72
73/* The hack below stinks... try to do something better one of
74 * these days... Does it even work properly with NR_CPUS == 1 ?
75 */
76static inline void mm_needs_global_tlbie(struct mm_struct *mm)
77{
78 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
79
80 /* Global TLBIE broadcast required with SPEs. */
81 __cpus_setall(&mm->cpu_vm_mask, nr);
82}
83
84void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&spu_list_lock, flags);
89 spu->mm = mm;
90 spin_unlock_irqrestore(&spu_list_lock, flags);
91 if (mm)
92 mm_needs_global_tlbie(mm);
93}
94EXPORT_SYMBOL_GPL(spu_associate_mm);
95
43static int __spu_trap_invalid_dma(struct spu *spu) 96static int __spu_trap_invalid_dma(struct spu *spu)
44{ 97{
45 pr_debug("%s\n", __FUNCTION__); 98 pr_debug("%s\n", __FUNCTION__);
@@ -74,6 +127,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
74 struct spu_priv2 __iomem *priv2 = spu->priv2; 127 struct spu_priv2 __iomem *priv2 = spu->priv2;
75 struct mm_struct *mm = spu->mm; 128 struct mm_struct *mm = spu->mm;
76 u64 esid, vsid, llp; 129 u64 esid, vsid, llp;
130 int psize;
77 131
78 pr_debug("%s\n", __FUNCTION__); 132 pr_debug("%s\n", __FUNCTION__);
79 133
@@ -90,22 +144,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
90 case USER_REGION_ID: 144 case USER_REGION_ID:
91#ifdef CONFIG_HUGETLB_PAGE 145#ifdef CONFIG_HUGETLB_PAGE
92 if (in_hugepage_area(mm->context, ea)) 146 if (in_hugepage_area(mm->context, ea))
93 llp = mmu_psize_defs[mmu_huge_psize].sllp; 147 psize = mmu_huge_psize;
94 else 148 else
95#endif 149#endif
96 llp = mmu_psize_defs[mmu_virtual_psize].sllp; 150 psize = mm->context.user_psize;
97 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | 151 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
98 SLB_VSID_USER | llp; 152 SLB_VSID_USER;
99 break; 153 break;
100 case VMALLOC_REGION_ID: 154 case VMALLOC_REGION_ID:
101 llp = mmu_psize_defs[mmu_virtual_psize].sllp; 155 if (ea < VMALLOC_END)
156 psize = mmu_vmalloc_psize;
157 else
158 psize = mmu_io_psize;
102 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 159 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
103 SLB_VSID_KERNEL | llp; 160 SLB_VSID_KERNEL;
104 break; 161 break;
105 case KERNEL_REGION_ID: 162 case KERNEL_REGION_ID:
106 llp = mmu_psize_defs[mmu_linear_psize].sllp; 163 psize = mmu_linear_psize;
107 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 164 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_KERNEL | llp; 165 SLB_VSID_KERNEL;
109 break; 166 break;
110 default: 167 default:
111 /* Future: support kernel segments so that drivers 168 /* Future: support kernel segments so that drivers
@@ -114,9 +171,10 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
114 pr_debug("invalid region access at %016lx\n", ea); 171 pr_debug("invalid region access at %016lx\n", ea);
115 return 1; 172 return 1;
116 } 173 }
174 llp = mmu_psize_defs[psize].sllp;
117 175
118 out_be64(&priv2->slb_index_W, spu->slb_replace); 176 out_be64(&priv2->slb_index_W, spu->slb_replace);
119 out_be64(&priv2->slb_vsid_RW, vsid); 177 out_be64(&priv2->slb_vsid_RW, vsid | llp);
120 out_be64(&priv2->slb_esid_RW, esid); 178 out_be64(&priv2->slb_esid_RW, esid);
121 179
122 spu->slb_replace++; 180 spu->slb_replace++;
@@ -330,10 +388,6 @@ static void spu_free_irqs(struct spu *spu)
330 free_irq(spu->irqs[2], spu); 388 free_irq(spu->irqs[2], spu);
331} 389}
332 390
333static struct list_head spu_list[MAX_NUMNODES];
334static LIST_HEAD(spu_full_list);
335static DEFINE_MUTEX(spu_mutex);
336
337static void spu_init_channels(struct spu *spu) 391static void spu_init_channels(struct spu *spu)
338{ 392{
339 static const struct { 393 static const struct {
@@ -593,6 +647,7 @@ static int __init create_spu(void *data)
593 struct spu *spu; 647 struct spu *spu;
594 int ret; 648 int ret;
595 static int number; 649 static int number;
650 unsigned long flags;
596 651
597 ret = -ENOMEM; 652 ret = -ENOMEM;
598 spu = kzalloc(sizeof (*spu), GFP_KERNEL); 653 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
@@ -620,8 +675,10 @@ static int __init create_spu(void *data)
620 goto out_free_irqs; 675 goto out_free_irqs;
621 676
622 mutex_lock(&spu_mutex); 677 mutex_lock(&spu_mutex);
678 spin_lock_irqsave(&spu_list_lock, flags);
623 list_add(&spu->list, &spu_list[spu->node]); 679 list_add(&spu->list, &spu_list[spu->node]);
624 list_add(&spu->full_list, &spu_full_list); 680 list_add(&spu->full_list, &spu_full_list);
681 spin_unlock_irqrestore(&spu_list_lock, flags);
625 mutex_unlock(&spu_mutex); 682 mutex_unlock(&spu_mutex);
626 683
627 goto out; 684 goto out;