aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2006-01-04 14:31:30 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 23:44:49 -0500
commitf0831acc4b78e2d9737e8ed91b8b7505b21ddb83 (patch)
tree0c901e45cdc932776d3953cfcdf66015d6853bec /arch/powerpc
parentce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f (diff)
[PATCH] spufs: abstract priv1 register access.
In a hypervisor based setup, direct access to the first priviledged register space can typically not be allowed to the kernel and has to be implemented through hypervisor calls. As suggested by Masato Noguchi, let's abstract the register access trough a number of function calls. Since there is currently no public specification of actual hypervisor calls to implement this, I only provide a place that makes it easier to hook into. Cc: Masato Noguchi <Masato.Noguchi@jp.sony.com> Cc: Geoff Levand <geoff.levand@am.sony.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/Makefile5
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c51
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1.c133
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c19
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c129
5 files changed, 208 insertions, 129 deletions
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index ebbd1141498..16031b565be 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -2,6 +2,9 @@ obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-y += pervasive.o 2obj-y += pervasive.o
3 3
4obj-$(CONFIG_SMP) += smp.o 4obj-$(CONFIG_SMP) += smp.o
5obj-$(CONFIG_SPU_FS) += spufs/ spu_base.o 5obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o
6
7spu-base-y += spu_base.o spu_priv1.o
8
6builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o 9builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
7obj-y += $(builtin-spufs-m) 10obj-y += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index ae835474072..081b3dcbaf1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -142,8 +142,7 @@ static int __spu_trap_mailbox(struct spu *spu)
142 142
143 /* atomically disable SPU mailbox interrupts */ 143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock); 144 spin_lock(&spu->register_lock);
145 out_be64(&spu->priv1->int_mask_class2_RW, 145 spu_int_mask_and(spu, 2, ~0x1);
146 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
147 spin_unlock(&spu->register_lock); 146 spin_unlock(&spu->register_lock);
148 return 0; 147 return 0;
149} 148}
@@ -180,8 +179,7 @@ static int __spu_trap_spubox(struct spu *spu)
180 179
181 /* atomically disable SPU mailbox interrupts */ 180 /* atomically disable SPU mailbox interrupts */
182 spin_lock(&spu->register_lock); 181 spin_lock(&spu->register_lock);
183 out_be64(&spu->priv1->int_mask_class2_RW, 182 spu_int_mask_and(spu, 2, ~0x10);
184 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
185 spin_unlock(&spu->register_lock); 183 spin_unlock(&spu->register_lock);
186 return 0; 184 return 0;
187} 185}
@@ -206,8 +204,8 @@ spu_irq_class_0_bottom(struct spu *spu)
206 204
207 spu->class_0_pending = 0; 205 spu->class_0_pending = 0;
208 206
209 mask = in_be64(&spu->priv1->int_mask_class0_RW); 207 mask = spu_int_mask_get(spu, 0);
210 stat = in_be64(&spu->priv1->int_stat_class0_RW); 208 stat = spu_int_stat_get(spu, 0);
211 209
212 stat &= mask; 210 stat &= mask;
213 211
@@ -220,7 +218,7 @@ spu_irq_class_0_bottom(struct spu *spu)
220 if (stat & 4) /* error on SPU */ 218 if (stat & 4) /* error on SPU */
221 __spu_trap_error(spu); 219 __spu_trap_error(spu);
222 220
223 out_be64(&spu->priv1->int_stat_class0_RW, stat); 221 spu_int_stat_clear(spu, 0, stat);
224 222
225 return (stat & 0x7) ? -EIO : 0; 223 return (stat & 0x7) ? -EIO : 0;
226} 224}
@@ -236,13 +234,13 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
236 234
237 /* atomically read & clear class1 status. */ 235 /* atomically read & clear class1 status. */
238 spin_lock(&spu->register_lock); 236 spin_lock(&spu->register_lock);
239 mask = in_be64(&spu->priv1->int_mask_class1_RW); 237 mask = spu_int_mask_get(spu, 1);
240 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask; 238 stat = spu_int_stat_get(spu, 1) & mask;
241 dar = in_be64(&spu->priv1->mfc_dar_RW); 239 dar = spu_mfc_dar_get(spu);
242 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW); 240 dsisr = spu_mfc_dsisr_get(spu);
243 if (stat & 2) /* mapping fault */ 241 if (stat & 2) /* mapping fault */
244 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL); 242 spu_mfc_dsisr_set(spu, 0ul);
245 out_be64(&spu->priv1->int_stat_class1_RW, stat); 243 spu_int_stat_clear(spu, 1, stat);
246 spin_unlock(&spu->register_lock); 244 spin_unlock(&spu->register_lock);
247 245
248 if (stat & 1) /* segment fault */ 246 if (stat & 1) /* segment fault */
@@ -270,8 +268,8 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
270 unsigned long mask; 268 unsigned long mask;
271 269
272 spu = data; 270 spu = data;
273 stat = in_be64(&spu->priv1->int_stat_class2_RW); 271 stat = spu_int_stat_get(spu, 2);
274 mask = in_be64(&spu->priv1->int_mask_class2_RW); 272 mask = spu_int_mask_get(spu, 2);
275 273
276 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); 274 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
277 275
@@ -292,7 +290,7 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
292 if (stat & 0x10) /* SPU mailbox threshold */ 290 if (stat & 0x10) /* SPU mailbox threshold */
293 __spu_trap_spubox(spu); 291 __spu_trap_spubox(spu);
294 292
295 out_be64(&spu->priv1->int_stat_class2_RW, stat); 293 spu_int_stat_clear(spu, 2, stat);
296 return stat ? IRQ_HANDLED : IRQ_NONE; 294 return stat ? IRQ_HANDLED : IRQ_NONE;
297} 295}
298 296
@@ -309,21 +307,18 @@ spu_request_irqs(struct spu *spu)
309 spu_irq_class_0, 0, spu->irq_c0, spu); 307 spu_irq_class_0, 0, spu->irq_c0, spu);
310 if (ret) 308 if (ret)
311 goto out; 309 goto out;
312 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
313 310
314 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); 311 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
315 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, 312 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
316 spu_irq_class_1, 0, spu->irq_c1, spu); 313 spu_irq_class_1, 0, spu->irq_c1, spu);
317 if (ret) 314 if (ret)
318 goto out1; 315 goto out1;
319 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
320 316
321 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); 317 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
322 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, 318 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
323 spu_irq_class_2, 0, spu->irq_c2, spu); 319 spu_irq_class_2, 0, spu->irq_c2, spu);
324 if (ret) 320 if (ret)
325 goto out2; 321 goto out2;
326 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
327 goto out; 322 goto out;
328 323
329out2: 324out2:
@@ -383,13 +378,6 @@ static void spu_init_channels(struct spu *spu)
383 } 378 }
384} 379}
385 380
386static void spu_init_regs(struct spu *spu)
387{
388 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
389 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
390 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
391}
392
393struct spu *spu_alloc(void) 381struct spu *spu_alloc(void)
394{ 382{
395 struct spu *spu; 383 struct spu *spu;
@@ -405,10 +393,8 @@ struct spu *spu_alloc(void)
405 } 393 }
406 up(&spu_mutex); 394 up(&spu_mutex);
407 395
408 if (spu) { 396 if (spu)
409 spu_init_channels(spu); 397 spu_init_channels(spu);
410 spu_init_regs(spu);
411 }
412 398
413 return spu; 399 return spu;
414} 400}
@@ -579,8 +565,7 @@ static int __init spu_map_device(struct spu *spu, struct device_node *spe)
579 goto out_unmap; 565 goto out_unmap;
580 566
581 spu->priv1= map_spe_prop(spe, "priv1"); 567 spu->priv1= map_spe_prop(spe, "priv1");
582 if (!spu->priv1) 568 /* priv1 is not available on a hypervisor */
583 goto out_unmap;
584 569
585 spu->priv2= map_spe_prop(spe, "priv2"); 570 spu->priv2= map_spe_prop(spe, "priv2");
586 if (!spu->priv2) 571 if (!spu->priv2)
@@ -633,8 +618,8 @@ static int __init create_spu(struct device_node *spe)
633 spu->dsisr = 0UL; 618 spu->dsisr = 0UL;
634 spin_lock_init(&spu->register_lock); 619 spin_lock_init(&spu->register_lock);
635 620
636 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); 621 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
637 out_be64(&spu->priv1->mfc_sr1_RW, 0x33); 622 spu_mfc_sr1_set(spu, 0x33);
638 623
639 spu->ibox_callback = NULL; 624 spu->ibox_callback = NULL;
640 spu->wbox_callback = NULL; 625 spu->wbox_callback = NULL;
diff --git a/arch/powerpc/platforms/cell/spu_priv1.c b/arch/powerpc/platforms/cell/spu_priv1.c
new file mode 100644
index 00000000000..b2656421c7b
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1.c
@@ -0,0 +1,133 @@
1/*
2 * access to SPU privileged registers
3 */
4#include <linux/module.h>
5
6#include <asm/io.h>
7#include <asm/spu.h>
8
9void spu_int_mask_and(struct spu *spu, int class, u64 mask)
10{
11 u64 old_mask;
12
13 old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
14 out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
15}
16EXPORT_SYMBOL_GPL(spu_int_mask_and);
17
18void spu_int_mask_or(struct spu *spu, int class, u64 mask)
19{
20 u64 old_mask;
21
22 old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
23 out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
24}
25EXPORT_SYMBOL_GPL(spu_int_mask_or);
26
27void spu_int_mask_set(struct spu *spu, int class, u64 mask)
28{
29 out_be64(&spu->priv1->int_mask_RW[class], mask);
30}
31EXPORT_SYMBOL_GPL(spu_int_mask_set);
32
33u64 spu_int_mask_get(struct spu *spu, int class)
34{
35 return in_be64(&spu->priv1->int_mask_RW[class]);
36}
37EXPORT_SYMBOL_GPL(spu_int_mask_get);
38
39void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
40{
41 out_be64(&spu->priv1->int_stat_RW[class], stat);
42}
43EXPORT_SYMBOL_GPL(spu_int_stat_clear);
44
45u64 spu_int_stat_get(struct spu *spu, int class)
46{
47 return in_be64(&spu->priv1->int_stat_RW[class]);
48}
49EXPORT_SYMBOL_GPL(spu_int_stat_get);
50
51void spu_int_route_set(struct spu *spu, u64 route)
52{
53 out_be64(&spu->priv1->int_route_RW, route);
54}
55EXPORT_SYMBOL_GPL(spu_int_route_set);
56
57u64 spu_mfc_dar_get(struct spu *spu)
58{
59 return in_be64(&spu->priv1->mfc_dar_RW);
60}
61EXPORT_SYMBOL_GPL(spu_mfc_dar_get);
62
63u64 spu_mfc_dsisr_get(struct spu *spu)
64{
65 return in_be64(&spu->priv1->mfc_dsisr_RW);
66}
67EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);
68
69void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
70{
71 out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
72}
73EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);
74
75void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
76{
77 out_be64(&spu->priv1->mfc_sdr_RW, sdr);
78}
79EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);
80
81void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
82{
83 out_be64(&spu->priv1->mfc_sr1_RW, sr1);
84}
85EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);
86
87u64 spu_mfc_sr1_get(struct spu *spu)
88{
89 return in_be64(&spu->priv1->mfc_sr1_RW);
90}
91EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);
92
93void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
94{
95 out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
96}
97EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);
98
99u64 spu_mfc_tclass_id_get(struct spu *spu)
100{
101 return in_be64(&spu->priv1->mfc_tclass_id_RW);
102}
103EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);
104
105void spu_tlb_invalidate(struct spu *spu)
106{
107 out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
108}
109EXPORT_SYMBOL_GPL(spu_tlb_invalidate);
110
111void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
112{
113 out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
114}
115EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);
116
117u64 spu_resource_allocation_groupID_get(struct spu *spu)
118{
119 return in_be64(&spu->priv1->resource_allocation_groupID_RW);
120}
121EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);
122
123void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
124{
125 out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
126}
127EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);
128
129u64 spu_resource_allocation_enable_get(struct spu *spu)
130{
131 return in_be64(&spu->priv1->resource_allocation_enable_RW);
132}
133EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 9a53e29f9d7..5445719bff7 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -62,7 +62,6 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62 unsigned int events) 62 unsigned int events)
63{ 63{
64 struct spu *spu = ctx->spu; 64 struct spu *spu = ctx->spu;
65 struct spu_priv1 __iomem *priv1 = spu->priv1;
66 int ret = 0; 65 int ret = 0;
67 u32 stat; 66 u32 stat;
68 67
@@ -78,18 +77,16 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
78 if (stat & 0xff0000) 77 if (stat & 0xff0000)
79 ret |= POLLIN | POLLRDNORM; 78 ret |= POLLIN | POLLRDNORM;
80 else { 79 else {
81 out_be64(&priv1->int_stat_class2_RW, 0x1); 80 spu_int_stat_clear(spu, 2, 0x1);
82 out_be64(&priv1->int_mask_class2_RW, 81 spu_int_mask_or(spu, 2, 0x1);
83 in_be64(&priv1->int_mask_class2_RW) | 0x1);
84 } 82 }
85 } 83 }
86 if (events & (POLLOUT | POLLWRNORM)) { 84 if (events & (POLLOUT | POLLWRNORM)) {
87 if (stat & 0x00ff00) 85 if (stat & 0x00ff00)
88 ret = POLLOUT | POLLWRNORM; 86 ret = POLLOUT | POLLWRNORM;
89 else { 87 else {
90 out_be64(&priv1->int_stat_class2_RW, 0x10); 88 spu_int_stat_clear(spu, 2, 0x10);
91 out_be64(&priv1->int_mask_class2_RW, 89 spu_int_mask_or(spu, 2, 0x10);
92 in_be64(&priv1->int_mask_class2_RW) | 0x10);
93 } 90 }
94 } 91 }
95 spin_unlock_irq(&spu->register_lock); 92 spin_unlock_irq(&spu->register_lock);
@@ -100,7 +97,6 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
100{ 97{
101 struct spu *spu = ctx->spu; 98 struct spu *spu = ctx->spu;
102 struct spu_problem __iomem *prob = spu->problem; 99 struct spu_problem __iomem *prob = spu->problem;
103 struct spu_priv1 __iomem *priv1 = spu->priv1;
104 struct spu_priv2 __iomem *priv2 = spu->priv2; 100 struct spu_priv2 __iomem *priv2 = spu->priv2;
105 int ret; 101 int ret;
106 102
@@ -111,8 +107,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
111 ret = 4; 107 ret = 4;
112 } else { 108 } else {
113 /* make sure we get woken up by the interrupt */ 109 /* make sure we get woken up by the interrupt */
114 out_be64(&priv1->int_mask_class2_RW, 110 spu_int_mask_or(spu, 2, 0x1);
115 in_be64(&priv1->int_mask_class2_RW) | 0x1);
116 ret = 0; 111 ret = 0;
117 } 112 }
118 spin_unlock_irq(&spu->register_lock); 113 spin_unlock_irq(&spu->register_lock);
@@ -123,7 +118,6 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
123{ 118{
124 struct spu *spu = ctx->spu; 119 struct spu *spu = ctx->spu;
125 struct spu_problem __iomem *prob = spu->problem; 120 struct spu_problem __iomem *prob = spu->problem;
126 struct spu_priv1 __iomem *priv1 = spu->priv1;
127 int ret; 121 int ret;
128 122
129 spin_lock_irq(&spu->register_lock); 123 spin_lock_irq(&spu->register_lock);
@@ -134,8 +128,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
134 } else { 128 } else {
135 /* make sure we get woken up by the interrupt when space 129 /* make sure we get woken up by the interrupt when space
136 becomes available */ 130 becomes available */
137 out_be64(&priv1->int_mask_class2_RW, 131 spu_int_mask_or(spu, 2, 0x10);
138 in_be64(&priv1->int_mask_class2_RW) | 0x10);
139 ret = 0; 132 ret = 0;
140 } 133 }
141 spin_unlock_irq(&spu->register_lock); 134 spin_unlock_irq(&spu->register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index de1ad146fc6..1061c12b2ed 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -108,8 +108,6 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
108 108
109static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) 109static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
110{ 110{
111 struct spu_priv1 __iomem *priv1 = spu->priv1;
112
113 /* Save, Step 3: 111 /* Save, Step 3:
114 * Restore, Step 2: 112 * Restore, Step 2:
115 * Save INT_Mask_class0 in CSA. 113 * Save INT_Mask_class0 in CSA.
@@ -121,16 +119,13 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
121 */ 119 */
122 spin_lock_irq(&spu->register_lock); 120 spin_lock_irq(&spu->register_lock);
123 if (csa) { 121 if (csa) {
124 csa->priv1.int_mask_class0_RW = 122 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
125 in_be64(&priv1->int_mask_class0_RW); 123 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
126 csa->priv1.int_mask_class1_RW = 124 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
127 in_be64(&priv1->int_mask_class1_RW);
128 csa->priv1.int_mask_class2_RW =
129 in_be64(&priv1->int_mask_class2_RW);
130 } 125 }
131 out_be64(&priv1->int_mask_class0_RW, 0UL); 126 spu_int_mask_set(spu, 0, 0ul);
132 out_be64(&priv1->int_mask_class1_RW, 0UL); 127 spu_int_mask_set(spu, 1, 0ul);
133 out_be64(&priv1->int_mask_class2_RW, 0UL); 128 spu_int_mask_set(spu, 2, 0ul);
134 eieio(); 129 eieio();
135 spin_unlock_irq(&spu->register_lock); 130 spin_unlock_irq(&spu->register_lock);
136} 131}
@@ -195,12 +190,10 @@ static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
195 190
196static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) 191static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
197{ 192{
198 struct spu_priv1 __iomem *priv1 = spu->priv1;
199
200 /* Save, Step 10: 193 /* Save, Step 10:
201 * Save MFC_SR1 in the CSA. 194 * Save MFC_SR1 in the CSA.
202 */ 195 */
203 csa->priv1.mfc_sr1_RW = in_be64(&priv1->mfc_sr1_RW); 196 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
204} 197}
205 198
206static inline void save_spu_status(struct spu_state *csa, struct spu *spu) 199static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
@@ -292,15 +285,13 @@ static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
292 285
293static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) 286static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
294{ 287{
295 struct spu_priv1 __iomem *priv1 = spu->priv1;
296
297 /* Save, Step 17: 288 /* Save, Step 17:
298 * Restore, Step 12. 289 * Restore, Step 12.
299 * Restore, Step 48. 290 * Restore, Step 48.
300 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. 291 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
301 * Then issue a PPE sync instruction. 292 * Then issue a PPE sync instruction.
302 */ 293 */
303 out_be64(&priv1->tlb_invalidate_entry_W, 0UL); 294 spu_tlb_invalidate(spu);
304 mb(); 295 mb();
305} 296}
306 297
@@ -410,25 +401,21 @@ static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
410 401
411static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 402static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
412{ 403{
413 struct spu_priv1 __iomem *priv1 = spu->priv1;
414
415 /* Save, Step 25: 404 /* Save, Step 25:
416 * Save the MFC_TCLASS_ID register in 405 * Save the MFC_TCLASS_ID register in
417 * the CSA. 406 * the CSA.
418 */ 407 */
419 csa->priv1.mfc_tclass_id_RW = in_be64(&priv1->mfc_tclass_id_RW); 408 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
420} 409}
421 410
422static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 411static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
423{ 412{
424 struct spu_priv1 __iomem *priv1 = spu->priv1;
425
426 /* Save, Step 26: 413 /* Save, Step 26:
427 * Restore, Step 23. 414 * Restore, Step 23.
428 * Write the MFC_TCLASS_ID register with 415 * Write the MFC_TCLASS_ID register with
429 * the value 0x10000000. 416 * the value 0x10000000.
430 */ 417 */
431 out_be64(&priv1->mfc_tclass_id_RW, 0x10000000); 418 spu_mfc_tclass_id_set(spu, 0x10000000);
432 eieio(); 419 eieio();
433} 420}
434 421
@@ -458,14 +445,13 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
458 445
459static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu) 446static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
460{ 447{
461 struct spu_priv1 __iomem *priv1 = spu->priv1;
462 struct spu_priv2 __iomem *priv2 = spu->priv2; 448 struct spu_priv2 __iomem *priv2 = spu->priv2;
463 int i; 449 int i;
464 450
465 /* Save, Step 29: 451 /* Save, Step 29:
466 * If MFC_SR1[R]='1', save SLBs in CSA. 452 * If MFC_SR1[R]='1', save SLBs in CSA.
467 */ 453 */
468 if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { 454 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
469 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); 455 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
470 for (i = 0; i < 8; i++) { 456 for (i = 0; i < 8; i++) {
471 out_be64(&priv2->slb_index_W, i); 457 out_be64(&priv2->slb_index_W, i);
@@ -479,8 +465,6 @@ static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
479 465
480static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) 466static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
481{ 467{
482 struct spu_priv1 __iomem *priv1 = spu->priv1;
483
484 /* Save, Step 30: 468 /* Save, Step 30:
485 * Restore, Step 18: 469 * Restore, Step 18:
486 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and 470 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
@@ -492,9 +476,9 @@ static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
492 * MFC_SR1[Pr] bit is not set. 476 * MFC_SR1[Pr] bit is not set.
493 * 477 *
494 */ 478 */
495 out_be64(&priv1->mfc_sr1_RW, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | 479 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
496 MFC_STATE1_RELOCATE_MASK | 480 MFC_STATE1_RELOCATE_MASK |
497 MFC_STATE1_BUS_TLBIE_MASK)); 481 MFC_STATE1_BUS_TLBIE_MASK));
498} 482}
499 483
500static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) 484static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
@@ -571,16 +555,14 @@ static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
571 555
572static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) 556static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
573{ 557{
574 struct spu_priv1 __iomem *priv1 = spu->priv1;
575
576 /* Save, Step 38: 558 /* Save, Step 38:
577 * Save RA_GROUP_ID register and the 559 * Save RA_GROUP_ID register and the
578 * RA_ENABLE reigster in the CSA. 560 * RA_ENABLE reigster in the CSA.
579 */ 561 */
580 csa->priv1.resource_allocation_groupID_RW = 562 csa->priv1.resource_allocation_groupID_RW =
581 in_be64(&priv1->resource_allocation_groupID_RW); 563 spu_resource_allocation_groupID_get(spu);
582 csa->priv1.resource_allocation_enable_RW = 564 csa->priv1.resource_allocation_enable_RW =
583 in_be64(&priv1->resource_allocation_enable_RW); 565 spu_resource_allocation_enable_get(spu);
584} 566}
585 567
586static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 568static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
@@ -698,14 +680,13 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
698 680
699static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu) 681static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
700{ 682{
701 struct spu_priv1 __iomem *priv1 = spu->priv1;
702 struct spu_priv2 __iomem *priv2 = spu->priv2; 683 struct spu_priv2 __iomem *priv2 = spu->priv2;
703 684
704 /* Save, Step 45: 685 /* Save, Step 45:
705 * Restore, Step 19: 686 * Restore, Step 19:
706 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. 687 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
707 */ 688 */
708 if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { 689 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
709 out_be64(&priv2->slb_invalidate_all_W, 0UL); 690 out_be64(&priv2->slb_invalidate_all_W, 0UL);
710 eieio(); 691 eieio();
711 } 692 }
@@ -774,7 +755,6 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
774 755
775static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) 756static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
776{ 757{
777 struct spu_priv1 __iomem *priv1 = spu->priv1;
778 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 758 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
779 CLASS1_ENABLE_STORAGE_FAULT_INTR; 759 CLASS1_ENABLE_STORAGE_FAULT_INTR;
780 760
@@ -787,12 +767,12 @@ static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
787 * (translation) interrupts. 767 * (translation) interrupts.
788 */ 768 */
789 spin_lock_irq(&spu->register_lock); 769 spin_lock_irq(&spu->register_lock);
790 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 770 spu_int_stat_clear(spu, 0, ~0ul);
791 out_be64(&priv1->int_stat_class1_RW, ~(0UL)); 771 spu_int_stat_clear(spu, 1, ~0ul);
792 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 772 spu_int_stat_clear(spu, 2, ~0ul);
793 out_be64(&priv1->int_mask_class0_RW, 0UL); 773 spu_int_mask_set(spu, 0, 0ul);
794 out_be64(&priv1->int_mask_class1_RW, class1_mask); 774 spu_int_mask_set(spu, 1, class1_mask);
795 out_be64(&priv1->int_mask_class2_RW, 0UL); 775 spu_int_mask_set(spu, 2, 0ul);
796 spin_unlock_irq(&spu->register_lock); 776 spin_unlock_irq(&spu->register_lock);
797} 777}
798 778
@@ -930,7 +910,6 @@ static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
930 910
931static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) 911static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
932{ 912{
933 struct spu_priv1 __iomem *priv1 = spu->priv1;
934 struct spu_problem __iomem *prob = spu->problem; 913 struct spu_problem __iomem *prob = spu->problem;
935 u32 mask = MFC_TAGID_TO_TAGMASK(0); 914 u32 mask = MFC_TAGID_TO_TAGMASK(0);
936 unsigned long flags; 915 unsigned long flags;
@@ -947,14 +926,13 @@ static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
947 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); 926 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
948 927
949 local_irq_save(flags); 928 local_irq_save(flags);
950 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 929 spu_int_stat_clear(spu, 0, ~(0ul));
951 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 930 spu_int_stat_clear(spu, 2, ~(0ul));
952 local_irq_restore(flags); 931 local_irq_restore(flags);
953} 932}
954 933
955static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) 934static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
956{ 935{
957 struct spu_priv1 __iomem *priv1 = spu->priv1;
958 struct spu_problem __iomem *prob = spu->problem; 936 struct spu_problem __iomem *prob = spu->problem;
959 unsigned long flags; 937 unsigned long flags;
960 938
@@ -967,8 +945,8 @@ static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
967 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 945 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
968 946
969 local_irq_save(flags); 947 local_irq_save(flags);
970 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 948 spu_int_stat_clear(spu, 0, ~(0ul));
971 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 949 spu_int_stat_clear(spu, 2, ~(0ul));
972 local_irq_restore(flags); 950 local_irq_restore(flags);
973} 951}
974 952
@@ -1067,7 +1045,6 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1067static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) 1045static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1068{ 1046{
1069 struct spu_problem __iomem *prob = spu->problem; 1047 struct spu_problem __iomem *prob = spu->problem;
1070 struct spu_priv1 __iomem *priv1 = spu->priv1;
1071 1048
1072 /* Restore, Step 10: 1049 /* Restore, Step 10:
1073 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, 1050 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
@@ -1076,8 +1053,8 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1076 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1077 if (in_be32(&prob->spu_status_R) & 1054 if (in_be32(&prob->spu_status_R) &
1078 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1055 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1079 out_be64(&priv1->mfc_sr1_RW, 1056 spu_mfc_sr1_set(spu,
1080 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1081 eieio(); 1058 eieio();
1082 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1083 eieio(); 1060 eieio();
@@ -1088,8 +1065,8 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1088 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1065 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1089 || (in_be32(&prob->spu_status_R) & 1066 || (in_be32(&prob->spu_status_R) &
1090 SPU_STATUS_ISOLATED_STATE)) { 1067 SPU_STATUS_ISOLATED_STATE)) {
1091 out_be64(&priv1->mfc_sr1_RW, 1068 spu_mfc_sr1_set(spu,
1092 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1093 eieio(); 1070 eieio();
1094 out_be32(&prob->spu_runcntl_RW, 0x2); 1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1095 eieio(); 1072 eieio();
@@ -1257,16 +1234,14 @@ static inline void setup_spu_status_part2(struct spu_state *csa,
1257 1234
1258static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) 1235static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1259{ 1236{
1260 struct spu_priv1 __iomem *priv1 = spu->priv1;
1261
1262 /* Restore, Step 29: 1237 /* Restore, Step 29:
1263 * Restore RA_GROUP_ID register and the 1238 * Restore RA_GROUP_ID register and the
1264 * RA_ENABLE reigster from the CSA. 1239 * RA_ENABLE reigster from the CSA.
1265 */ 1240 */
1266 out_be64(&priv1->resource_allocation_groupID_RW, 1241 spu_resource_allocation_groupID_set(spu,
1267 csa->priv1.resource_allocation_groupID_RW); 1242 csa->priv1.resource_allocation_groupID_RW);
1268 out_be64(&priv1->resource_allocation_enable_RW, 1243 spu_resource_allocation_enable_set(spu,
1269 csa->priv1.resource_allocation_enable_RW); 1244 csa->priv1.resource_allocation_enable_RW);
1270} 1245}
1271 1246
1272static inline void send_restore_code(struct spu_state *csa, struct spu *spu) 1247static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
@@ -1409,8 +1384,6 @@ static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1409 1384
1410static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) 1385static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1411{ 1386{
1412 struct spu_priv1 __iomem *priv1 = spu->priv1;
1413
1414 /* Restore, Step 49: 1387 /* Restore, Step 49:
1415 * Write INT_MASK_class0 with value of 0. 1388 * Write INT_MASK_class0 with value of 0.
1416 * Write INT_MASK_class1 with value of 0. 1389 * Write INT_MASK_class1 with value of 0.
@@ -1420,12 +1393,12 @@ static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1420 * Write INT_STAT_class2 with value of -1. 1393 * Write INT_STAT_class2 with value of -1.
1421 */ 1394 */
1422 spin_lock_irq(&spu->register_lock); 1395 spin_lock_irq(&spu->register_lock);
1423 out_be64(&priv1->int_mask_class0_RW, 0UL); 1396 spu_int_mask_set(spu, 0, 0ul);
1424 out_be64(&priv1->int_mask_class1_RW, 0UL); 1397 spu_int_mask_set(spu, 1, 0ul);
1425 out_be64(&priv1->int_mask_class2_RW, 0UL); 1398 spu_int_mask_set(spu, 2, 0ul);
1426 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 1399 spu_int_stat_clear(spu, 0, ~0ul);
1427 out_be64(&priv1->int_stat_class1_RW, ~(0UL)); 1400 spu_int_stat_clear(spu, 1, ~0ul);
1428 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 1401 spu_int_stat_clear(spu, 2, ~0ul);
1429 spin_unlock_irq(&spu->register_lock); 1402 spin_unlock_irq(&spu->register_lock);
1430} 1403}
1431 1404
@@ -1522,12 +1495,10 @@ static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1522 1495
1523static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 1496static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1524{ 1497{
1525 struct spu_priv1 __iomem *priv1 = spu->priv1;
1526
1527 /* Restore, Step 56: 1498 /* Restore, Step 56:
1528 * Restore the MFC_TCLASS_ID register from CSA. 1499 * Restore the MFC_TCLASS_ID register from CSA.
1529 */ 1500 */
1530 out_be64(&priv1->mfc_tclass_id_RW, csa->priv1.mfc_tclass_id_RW); 1501 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1531 eieio(); 1502 eieio();
1532} 1503}
1533 1504
@@ -1689,7 +1660,6 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1689 1660
1690static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) 1661static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1691{ 1662{
1692 struct spu_priv1 __iomem *priv1 = spu->priv1;
1693 struct spu_priv2 __iomem *priv2 = spu->priv2; 1663 struct spu_priv2 __iomem *priv2 = spu->priv2;
1694 u64 dummy = 0UL; 1664 u64 dummy = 0UL;
1695 1665
@@ -1700,8 +1670,7 @@ static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1700 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { 1670 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1701 dummy = in_be64(&priv2->puint_mb_R); 1671 dummy = in_be64(&priv2->puint_mb_R);
1702 eieio(); 1672 eieio();
1703 out_be64(&priv1->int_stat_class2_RW, 1673 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1704 CLASS2_ENABLE_MAILBOX_INTR);
1705 eieio(); 1674 eieio();
1706 } 1675 }
1707} 1676}
@@ -1729,12 +1698,10 @@ static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
1729 1698
1730static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) 1699static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1731{ 1700{
1732 struct spu_priv1 __iomem *priv1 = spu->priv1;
1733
1734 /* Restore, Step 69: 1701 /* Restore, Step 69:
1735 * Restore the MFC_SR1 register from CSA. 1702 * Restore the MFC_SR1 register from CSA.
1736 */ 1703 */
1737 out_be64(&priv1->mfc_sr1_RW, csa->priv1.mfc_sr1_RW); 1704 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1738 eieio(); 1705 eieio();
1739} 1706}
1740 1707
@@ -1792,15 +1759,13 @@ static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1792 1759
1793static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) 1760static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1794{ 1761{
1795 struct spu_priv1 __iomem *priv1 = spu->priv1;
1796
1797 /* Restore, Step 75: 1762 /* Restore, Step 75:
1798 * Re-enable SPU interrupts. 1763 * Re-enable SPU interrupts.
1799 */ 1764 */
1800 spin_lock_irq(&spu->register_lock); 1765 spin_lock_irq(&spu->register_lock);
1801 out_be64(&priv1->int_mask_class0_RW, csa->priv1.int_mask_class0_RW); 1766 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1802 out_be64(&priv1->int_mask_class1_RW, csa->priv1.int_mask_class1_RW); 1767 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1803 out_be64(&priv1->int_mask_class2_RW, csa->priv1.int_mask_class2_RW); 1768 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1804 spin_unlock_irq(&spu->register_lock); 1769 spin_unlock_irq(&spu->register_lock);
1805} 1770}
1806 1771