aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Munsie <imunsie@au1.ibm.com>2014-10-08 04:54:51 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-10-08 05:14:55 -0400
commit73d16a6e0e51990cbe13f8d8f43bd5329bbab30a (patch)
treed151865457d0653072ee8dd2fbc23fb84f05fe23
parente83d01697583d8610d1d62279758c2a881e3396f (diff)
powerpc/cell: Move data segment faulting code out of cell platform
__spu_trap_data_seg() currently contains code to determine the VSID and ESID required for a particular EA and mm struct. This code is generically useful for other co-processors. This moves the code of the cell platform so it can be used by other powerpc code. It also adds 1TB segment handling which Cell didn't support. The new function is called copro_calculate_slb(). This also moves the internal struct spu_slb to a generic struct copro_slb which is now used in the Cell and copro code. We use this new struct instead of passing around esid and vsid parameters. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/copro.h7
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h7
-rw-r--r--arch/powerpc/mm/copro_fault.c46
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c55
5 files changed, 69 insertions, 49 deletions
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
index 51cae85a50b4..b0e6a183181f 100644
--- a/arch/powerpc/include/asm/copro.h
+++ b/arch/powerpc/include/asm/copro.h
@@ -10,7 +10,14 @@
10#ifndef _ASM_POWERPC_COPRO_H 10#ifndef _ASM_POWERPC_COPRO_H
11#define _ASM_POWERPC_COPRO_H 11#define _ASM_POWERPC_COPRO_H
12 12
13struct copro_slb
14{
15 u64 esid, vsid;
16};
17
13int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 18int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
14 unsigned long dsisr, unsigned *flt); 19 unsigned long dsisr, unsigned *flt);
15 20
21int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
22
16#endif /* _ASM_POWERPC_COPRO_H */ 23#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 92bc3a637923..bd813c00ede2 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
190 190
191#ifndef __ASSEMBLY__ 191#ifndef __ASSEMBLY__
192 192
193static inline int slb_vsid_shift(int ssize)
194{
195 if (ssize == MMU_SEGSIZE_256M)
196 return SLB_VSID_SHIFT;
197 return SLB_VSID_SHIFT_1T;
198}
199
193static inline int segment_shift(int ssize) 200static inline int segment_shift(int ssize)
194{ 201{
195 if (ssize == MMU_SEGSIZE_256M) 202 if (ssize == MMU_SEGSIZE_256M)
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index ba7df14c6b82..a15a23efc0e2 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/export.h> 25#include <linux/export.h>
26#include <asm/reg.h> 26#include <asm/reg.h>
27#include <asm/copro.h>
27 28
28/* 29/*
29 * This ought to be kept in sync with the powerpc specific do_page_fault 30 * This ought to be kept in sync with the powerpc specific do_page_fault
@@ -90,3 +91,48 @@ out_unlock:
90 return ret; 91 return ret;
91} 92}
92EXPORT_SYMBOL_GPL(copro_handle_mm_fault); 93EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
94
95int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
96{
97 u64 vsid;
98 int psize, ssize;
99
100 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
101
102 switch (REGION_ID(ea)) {
103 case USER_REGION_ID:
104 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
105 psize = get_slice_psize(mm, ea);
106 ssize = user_segment_size(ea);
107 vsid = get_vsid(mm->context.id, ea, ssize);
108 break;
109 case VMALLOC_REGION_ID:
110 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
111 if (ea < VMALLOC_END)
112 psize = mmu_vmalloc_psize;
113 else
114 psize = mmu_io_psize;
115 ssize = mmu_kernel_ssize;
116 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117 break;
118 case KERNEL_REGION_ID:
119 pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
120 psize = mmu_linear_psize;
121 ssize = mmu_kernel_ssize;
122 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
123 break;
124 default:
125 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
126 return 1;
127 }
128
129 vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER;
130
131 vsid |= mmu_psize_defs[psize].sllp |
132 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
133
134 slb->vsid = vsid;
135
136 return 0;
137}
138EXPORT_SYMBOL_GPL(copro_calculate_slb);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0399a6702958..6e450ca66526 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
46 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; 46 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
47} 47}
48 48
49#define slb_vsid_shift(ssize) \
50 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
51
52static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 49static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
53 unsigned long flags) 50 unsigned long flags)
54{ 51{
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 2930d1e81a05..ffcbd242e669 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list);
76static DEFINE_SPINLOCK(spu_full_list_lock); 76static DEFINE_SPINLOCK(spu_full_list_lock);
77static DEFINE_MUTEX(spu_full_list_mutex); 77static DEFINE_MUTEX(spu_full_list_mutex);
78 78
79struct spu_slb {
80 u64 esid, vsid;
81};
82
83void spu_invalidate_slbs(struct spu *spu) 79void spu_invalidate_slbs(struct spu *spu)
84{ 80{
85 struct spu_priv2 __iomem *priv2 = spu->priv2; 81 struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu)
149 } 145 }
150} 146}
151 147
152static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) 148static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
153{ 149{
154 struct spu_priv2 __iomem *priv2 = spu->priv2; 150 struct spu_priv2 __iomem *priv2 = spu->priv2;
155 151
@@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
167 163
168static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) 164static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
169{ 165{
170 struct mm_struct *mm = spu->mm; 166 struct copro_slb slb;
171 struct spu_slb slb; 167 int ret;
172 int psize;
173
174 pr_debug("%s\n", __func__);
175
176 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
177 168
178 switch(REGION_ID(ea)) { 169 ret = copro_calculate_slb(spu->mm, ea, &slb);
179 case USER_REGION_ID: 170 if (ret)
180#ifdef CONFIG_PPC_MM_SLICES 171 return ret;
181 psize = get_slice_psize(mm, ea);
182#else
183 psize = mm->context.user_psize;
184#endif
185 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
186 << SLB_VSID_SHIFT) | SLB_VSID_USER;
187 break;
188 case VMALLOC_REGION_ID:
189 if (ea < VMALLOC_END)
190 psize = mmu_vmalloc_psize;
191 else
192 psize = mmu_io_psize;
193 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
194 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
195 break;
196 case KERNEL_REGION_ID:
197 psize = mmu_linear_psize;
198 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
199 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
200 break;
201 default:
202 /* Future: support kernel segments so that drivers
203 * can use SPUs.
204 */
205 pr_debug("invalid region access at %016lx\n", ea);
206 return 1;
207 }
208 slb.vsid |= mmu_psize_defs[psize].sllp;
209 172
210 spu_load_slb(spu, spu->slb_replace, &slb); 173 spu_load_slb(spu, spu->slb_replace, &slb);
211 174
@@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
253 return 0; 216 return 0;
254} 217}
255 218
256static void __spu_kernel_slb(void *addr, struct spu_slb *slb) 219static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
257{ 220{
258 unsigned long ea = (unsigned long)addr; 221 unsigned long ea = (unsigned long)addr;
259 u64 llp; 222 u64 llp;
@@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
272 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the 235 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
273 * address @new_addr is present. 236 * address @new_addr is present.
274 */ 237 */
275static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, 238static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
276 void *new_addr) 239 void *new_addr)
277{ 240{
278 unsigned long ea = (unsigned long)new_addr; 241 unsigned long ea = (unsigned long)new_addr;
@@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
297void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, 260void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
298 void *code, int code_size) 261 void *code, int code_size)
299{ 262{
300 struct spu_slb slbs[4]; 263 struct copro_slb slbs[4];
301 int i, nr_slbs = 0; 264 int i, nr_slbs = 0;
302 /* start and end addresses of both mappings */ 265 /* start and end addresses of both mappings */
303 void *addrs[] = { 266 void *addrs[] = {