diff options
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 81 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 24 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 20 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 62 | ||||
-rw-r--r-- | arch/powerpc/platforms/ps3/mm.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/ps3/system-bus.c | 2 |
8 files changed, 96 insertions, 105 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index c43999a10deb..eba7a2641dce 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -38,8 +38,61 @@ | |||
38 | const struct spu_management_ops *spu_management_ops; | 38 | const struct spu_management_ops *spu_management_ops; |
39 | const struct spu_priv1_ops *spu_priv1_ops; | 39 | const struct spu_priv1_ops *spu_priv1_ops; |
40 | 40 | ||
41 | static struct list_head spu_list[MAX_NUMNODES]; | ||
42 | static LIST_HEAD(spu_full_list); | ||
43 | static DEFINE_MUTEX(spu_mutex); | ||
44 | static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED; | ||
45 | |||
41 | EXPORT_SYMBOL_GPL(spu_priv1_ops); | 46 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
42 | 47 | ||
48 | void spu_invalidate_slbs(struct spu *spu) | ||
49 | { | ||
50 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
51 | |||
52 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) | ||
53 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); | ||
56 | |||
57 | /* This is called by the MM core when a segment size is changed, to | ||
58 | * request a flush of all the SPEs using a given mm | ||
59 | */ | ||
60 | void spu_flush_all_slbs(struct mm_struct *mm) | ||
61 | { | ||
62 | struct spu *spu; | ||
63 | unsigned long flags; | ||
64 | |||
65 | spin_lock_irqsave(&spu_list_lock, flags); | ||
66 | list_for_each_entry(spu, &spu_full_list, full_list) { | ||
67 | if (spu->mm == mm) | ||
68 | spu_invalidate_slbs(spu); | ||
69 | } | ||
70 | spin_unlock_irqrestore(&spu_list_lock, flags); | ||
71 | } | ||
72 | |||
73 | /* The hack below stinks... try to do something better one of | ||
74 | * these days... Does it even work properly with NR_CPUS == 1 ? | ||
75 | */ | ||
76 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | ||
77 | { | ||
78 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | ||
79 | |||
80 | /* Global TLBIE broadcast required with SPEs. */ | ||
81 | __cpus_setall(&mm->cpu_vm_mask, nr); | ||
82 | } | ||
83 | |||
84 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | |||
88 | spin_lock_irqsave(&spu_list_lock, flags); | ||
89 | spu->mm = mm; | ||
90 | spin_unlock_irqrestore(&spu_list_lock, flags); | ||
91 | if (mm) | ||
92 | mm_needs_global_tlbie(mm); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(spu_associate_mm); | ||
95 | |||
43 | static int __spu_trap_invalid_dma(struct spu *spu) | 96 | static int __spu_trap_invalid_dma(struct spu *spu) |
44 | { | 97 | { |
45 | pr_debug("%s\n", __FUNCTION__); | 98 | pr_debug("%s\n", __FUNCTION__); |
@@ -74,6 +127,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
74 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 127 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
75 | struct mm_struct *mm = spu->mm; | 128 | struct mm_struct *mm = spu->mm; |
76 | u64 esid, vsid, llp; | 129 | u64 esid, vsid, llp; |
130 | int psize; | ||
77 | 131 | ||
78 | pr_debug("%s\n", __FUNCTION__); | 132 | pr_debug("%s\n", __FUNCTION__); |
79 | 133 | ||
@@ -90,22 +144,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
90 | case USER_REGION_ID: | 144 | case USER_REGION_ID: |
91 | #ifdef CONFIG_HUGETLB_PAGE | 145 | #ifdef CONFIG_HUGETLB_PAGE |
92 | if (in_hugepage_area(mm->context, ea)) | 146 | if (in_hugepage_area(mm->context, ea)) |
93 | llp = mmu_psize_defs[mmu_huge_psize].sllp; | 147 | psize = mmu_huge_psize; |
94 | else | 148 | else |
95 | #endif | 149 | #endif |
96 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 150 | psize = mm->context.user_psize; |
97 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | | 151 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
98 | SLB_VSID_USER | llp; | 152 | SLB_VSID_USER; |
99 | break; | 153 | break; |
100 | case VMALLOC_REGION_ID: | 154 | case VMALLOC_REGION_ID: |
101 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 155 | if (ea < VMALLOC_END) |
156 | psize = mmu_vmalloc_psize; | ||
157 | else | ||
158 | psize = mmu_io_psize; | ||
102 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | | 159 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
103 | SLB_VSID_KERNEL | llp; | 160 | SLB_VSID_KERNEL; |
104 | break; | 161 | break; |
105 | case KERNEL_REGION_ID: | 162 | case KERNEL_REGION_ID: |
106 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | 163 | psize = mmu_linear_psize; |
107 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | | 164 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
108 | SLB_VSID_KERNEL | llp; | 165 | SLB_VSID_KERNEL; |
109 | break; | 166 | break; |
110 | default: | 167 | default: |
111 | /* Future: support kernel segments so that drivers | 168 | /* Future: support kernel segments so that drivers |
@@ -114,9 +171,10 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
114 | pr_debug("invalid region access at %016lx\n", ea); | 171 | pr_debug("invalid region access at %016lx\n", ea); |
115 | return 1; | 172 | return 1; |
116 | } | 173 | } |
174 | llp = mmu_psize_defs[psize].sllp; | ||
117 | 175 | ||
118 | out_be64(&priv2->slb_index_W, spu->slb_replace); | 176 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
119 | out_be64(&priv2->slb_vsid_RW, vsid); | 177 | out_be64(&priv2->slb_vsid_RW, vsid | llp); |
120 | out_be64(&priv2->slb_esid_RW, esid); | 178 | out_be64(&priv2->slb_esid_RW, esid); |
121 | 179 | ||
122 | spu->slb_replace++; | 180 | spu->slb_replace++; |
@@ -330,10 +388,6 @@ static void spu_free_irqs(struct spu *spu) | |||
330 | free_irq(spu->irqs[2], spu); | 388 | free_irq(spu->irqs[2], spu); |
331 | } | 389 | } |
332 | 390 | ||
333 | static struct list_head spu_list[MAX_NUMNODES]; | ||
334 | static LIST_HEAD(spu_full_list); | ||
335 | static DEFINE_MUTEX(spu_mutex); | ||
336 | |||
337 | static void spu_init_channels(struct spu *spu) | 391 | static void spu_init_channels(struct spu *spu) |
338 | { | 392 | { |
339 | static const struct { | 393 | static const struct { |
@@ -593,6 +647,7 @@ static int __init create_spu(void *data) | |||
593 | struct spu *spu; | 647 | struct spu *spu; |
594 | int ret; | 648 | int ret; |
595 | static int number; | 649 | static int number; |
650 | unsigned long flags; | ||
596 | 651 | ||
597 | ret = -ENOMEM; | 652 | ret = -ENOMEM; |
598 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); | 653 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
@@ -620,8 +675,10 @@ static int __init create_spu(void *data) | |||
620 | goto out_free_irqs; | 675 | goto out_free_irqs; |
621 | 676 | ||
622 | mutex_lock(&spu_mutex); | 677 | mutex_lock(&spu_mutex); |
678 | spin_lock_irqsave(&spu_list_lock, flags); | ||
623 | list_add(&spu->list, &spu_list[spu->node]); | 679 | list_add(&spu->list, &spu_list[spu->node]); |
624 | list_add(&spu->full_list, &spu_full_list); | 680 | list_add(&spu->full_list, &spu_full_list); |
681 | spin_unlock_irqrestore(&spu_list_lock, flags); | ||
625 | mutex_unlock(&spu_mutex); | 682 | mutex_unlock(&spu_mutex); |
626 | 683 | ||
627 | goto out; | 684 | goto out; |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index b00653d69c01..505266a568d4 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -63,8 +63,8 @@ static ssize_t | |||
63 | spufs_mem_read(struct file *file, char __user *buffer, | 63 | spufs_mem_read(struct file *file, char __user *buffer, |
64 | size_t size, loff_t *pos) | 64 | size_t size, loff_t *pos) |
65 | { | 65 | { |
66 | int ret; | ||
67 | struct spu_context *ctx = file->private_data; | 66 | struct spu_context *ctx = file->private_data; |
67 | ssize_t ret; | ||
68 | 68 | ||
69 | spu_acquire(ctx); | 69 | spu_acquire(ctx); |
70 | ret = __spufs_mem_read(ctx, buffer, size, pos); | 70 | ret = __spufs_mem_read(ctx, buffer, size, pos); |
@@ -74,25 +74,29 @@ spufs_mem_read(struct file *file, char __user *buffer, | |||
74 | 74 | ||
75 | static ssize_t | 75 | static ssize_t |
76 | spufs_mem_write(struct file *file, const char __user *buffer, | 76 | spufs_mem_write(struct file *file, const char __user *buffer, |
77 | size_t size, loff_t *pos) | 77 | size_t size, loff_t *ppos) |
78 | { | 78 | { |
79 | struct spu_context *ctx = file->private_data; | 79 | struct spu_context *ctx = file->private_data; |
80 | char *local_store; | 80 | char *local_store; |
81 | loff_t pos = *ppos; | ||
81 | int ret; | 82 | int ret; |
82 | 83 | ||
83 | size = min_t(ssize_t, LS_SIZE - *pos, size); | 84 | if (pos < 0) |
84 | if (size <= 0) | 85 | return -EINVAL; |
86 | if (pos > LS_SIZE) | ||
85 | return -EFBIG; | 87 | return -EFBIG; |
86 | *pos += size; | 88 | if (size > LS_SIZE - pos) |
89 | size = LS_SIZE - pos; | ||
87 | 90 | ||
88 | spu_acquire(ctx); | 91 | spu_acquire(ctx); |
89 | |||
90 | local_store = ctx->ops->get_ls(ctx); | 92 | local_store = ctx->ops->get_ls(ctx); |
91 | ret = copy_from_user(local_store + *pos - size, | 93 | ret = copy_from_user(local_store + pos, buffer, size); |
92 | buffer, size) ? -EFAULT : size; | ||
93 | |||
94 | spu_release(ctx); | 94 | spu_release(ctx); |
95 | return ret; | 95 | |
96 | if (ret) | ||
97 | return -EFAULT; | ||
98 | *ppos = pos + size; | ||
99 | return size; | ||
96 | } | 100 | } |
97 | 101 | ||
98 | static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, | 102 | static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 353a8fa07ab8..f95a611ca362 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) | |||
143 | int ret; | 143 | int ret; |
144 | unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; | 144 | unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; |
145 | 145 | ||
146 | ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); | 146 | ret = spu_acquire_runnable(ctx, 0); |
147 | if (ret) | 147 | if (ret) |
148 | return ret; | 148 | return ret; |
149 | 149 | ||
@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) | |||
155 | spu_release(ctx); | 155 | spu_release(ctx); |
156 | ret = spu_setup_isolated(ctx); | 156 | ret = spu_setup_isolated(ctx); |
157 | if (!ret) | 157 | if (!ret) |
158 | ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); | 158 | ret = spu_acquire_runnable(ctx, 0); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* if userspace has set the runcntrl register (eg, to issue an | 161 | /* if userspace has set the runcntrl register (eg, to issue an |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 2f25e68b4bac..39823cec0844 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -127,14 +127,6 @@ static void spu_remove_from_active_list(struct spu *spu) | |||
127 | mutex_unlock(&spu_prio->active_mutex[node]); | 127 | mutex_unlock(&spu_prio->active_mutex[node]); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | ||
131 | { | ||
132 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; | ||
133 | |||
134 | /* Global TLBIE broadcast required with SPEs. */ | ||
135 | __cpus_setall(&mm->cpu_vm_mask, nr); | ||
136 | } | ||
137 | |||
138 | static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); | 130 | static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); |
139 | 131 | ||
140 | static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) | 132 | static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) |
@@ -167,8 +159,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | |||
167 | ctx->spu = spu; | 159 | ctx->spu = spu; |
168 | ctx->ops = &spu_hw_ops; | 160 | ctx->ops = &spu_hw_ops; |
169 | spu->pid = current->pid; | 161 | spu->pid = current->pid; |
170 | spu->mm = ctx->owner; | 162 | spu_associate_mm(spu, ctx->owner); |
171 | mm_needs_global_tlbie(spu->mm); | ||
172 | spu->ibox_callback = spufs_ibox_callback; | 163 | spu->ibox_callback = spufs_ibox_callback; |
173 | spu->wbox_callback = spufs_wbox_callback; | 164 | spu->wbox_callback = spufs_wbox_callback; |
174 | spu->stop_callback = spufs_stop_callback; | 165 | spu->stop_callback = spufs_stop_callback; |
@@ -205,7 +196,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
205 | spu->stop_callback = NULL; | 196 | spu->stop_callback = NULL; |
206 | spu->mfc_callback = NULL; | 197 | spu->mfc_callback = NULL; |
207 | spu->dma_callback = NULL; | 198 | spu->dma_callback = NULL; |
208 | spu->mm = NULL; | 199 | spu_associate_mm(spu, NULL); |
209 | spu->pid = 0; | 200 | spu->pid = 0; |
210 | ctx->ops = &spu_backing_ops; | 201 | ctx->ops = &spu_backing_ops; |
211 | ctx->spu = NULL; | 202 | ctx->spu = NULL; |
@@ -263,7 +254,6 @@ static void spu_prio_wait(struct spu_context *ctx) | |||
263 | { | 254 | { |
264 | DEFINE_WAIT(wait); | 255 | DEFINE_WAIT(wait); |
265 | 256 | ||
266 | set_bit(SPU_SCHED_WAKE, &ctx->sched_flags); | ||
267 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); | 257 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); |
268 | if (!signal_pending(current)) { | 258 | if (!signal_pending(current)) { |
269 | mutex_unlock(&ctx->state_mutex); | 259 | mutex_unlock(&ctx->state_mutex); |
@@ -272,7 +262,6 @@ static void spu_prio_wait(struct spu_context *ctx) | |||
272 | } | 262 | } |
273 | __set_current_state(TASK_RUNNING); | 263 | __set_current_state(TASK_RUNNING); |
274 | remove_wait_queue(&ctx->stop_wq, &wait); | 264 | remove_wait_queue(&ctx->stop_wq, &wait); |
275 | clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags); | ||
276 | } | 265 | } |
277 | 266 | ||
278 | /** | 267 | /** |
@@ -292,7 +281,7 @@ static void spu_reschedule(struct spu *spu) | |||
292 | best = sched_find_first_bit(spu_prio->bitmap); | 281 | best = sched_find_first_bit(spu_prio->bitmap); |
293 | if (best < MAX_PRIO) { | 282 | if (best < MAX_PRIO) { |
294 | struct spu_context *ctx = spu_grab_context(best); | 283 | struct spu_context *ctx = spu_grab_context(best); |
295 | if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) | 284 | if (ctx) |
296 | wake_up(&ctx->stop_wq); | 285 | wake_up(&ctx->stop_wq); |
297 | } | 286 | } |
298 | spin_unlock(&spu_prio->runq_lock); | 287 | spin_unlock(&spu_prio->runq_lock); |
@@ -414,8 +403,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
414 | } | 403 | } |
415 | 404 | ||
416 | spu_add_to_rq(ctx); | 405 | spu_add_to_rq(ctx); |
417 | if (!(flags & SPU_ACTIVATE_NOWAKE)) | 406 | spu_prio_wait(ctx); |
418 | spu_prio_wait(ctx); | ||
419 | spu_del_from_rq(ctx); | 407 | spu_del_from_rq(ctx); |
420 | } while (!signal_pending(current)); | 408 | } while (!signal_pending(current)); |
421 | 409 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0c437891dfd5..5c4e47d69d79 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -41,7 +41,7 @@ struct spu_gang; | |||
41 | 41 | ||
42 | /* ctx->sched_flags */ | 42 | /* ctx->sched_flags */ |
43 | enum { | 43 | enum { |
44 | SPU_SCHED_WAKE = 0, | 44 | SPU_SCHED_WAKE = 0, /* currently unused */ |
45 | }; | 45 | }; |
46 | 46 | ||
47 | struct spu_context { | 47 | struct spu_context { |
@@ -191,9 +191,7 @@ void spu_forget(struct spu_context *ctx); | |||
191 | int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); | 191 | int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); |
192 | void spu_acquire_saved(struct spu_context *ctx); | 192 | void spu_acquire_saved(struct spu_context *ctx); |
193 | int spu_acquire_exclusive(struct spu_context *ctx); | 193 | int spu_acquire_exclusive(struct spu_context *ctx); |
194 | enum { | 194 | |
195 | SPU_ACTIVATE_NOWAKE = 1, | ||
196 | }; | ||
197 | int spu_activate(struct spu_context *ctx, unsigned long flags); | 195 | int spu_activate(struct spu_context *ctx, unsigned long flags); |
198 | void spu_deactivate(struct spu_context *ctx); | 196 | void spu_deactivate(struct spu_context *ctx); |
199 | void spu_yield(struct spu_context *ctx); | 197 | void spu_yield(struct spu_context *ctx); |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index c08981ff7fc6..fd91c73de34e 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -468,26 +468,6 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) | |||
468 | MFC_CNTL_PURGE_DMA_COMPLETE); | 468 | MFC_CNTL_PURGE_DMA_COMPLETE); |
469 | } | 469 | } |
470 | 470 | ||
471 | static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
472 | { | ||
473 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
474 | int i; | ||
475 | |||
476 | /* Save, Step 29: | ||
477 | * If MFC_SR1[R]='1', save SLBs in CSA. | ||
478 | */ | ||
479 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { | ||
480 | csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); | ||
481 | for (i = 0; i < 8; i++) { | ||
482 | out_be64(&priv2->slb_index_W, i); | ||
483 | eieio(); | ||
484 | csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW); | ||
485 | csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW); | ||
486 | eieio(); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
491 | static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) | 471 | static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) |
492 | { | 472 | { |
493 | /* Save, Step 30: | 473 | /* Save, Step 30: |
@@ -708,20 +688,6 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) | |||
708 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); | 688 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); |
709 | } | 689 | } |
710 | 690 | ||
711 | static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu) | ||
712 | { | ||
713 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
714 | |||
715 | /* Save, Step 45: | ||
716 | * Restore, Step 19: | ||
717 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. | ||
718 | */ | ||
719 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { | ||
720 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | ||
721 | eieio(); | ||
722 | } | ||
723 | } | ||
724 | |||
725 | static inline void get_kernel_slb(u64 ea, u64 slb[2]) | 691 | static inline void get_kernel_slb(u64 ea, u64 slb[2]) |
726 | { | 692 | { |
727 | u64 llp; | 693 | u64 llp; |
@@ -765,7 +731,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) | |||
765 | * MFC_SR1[R]=1 (in other words, assume that | 731 | * MFC_SR1[R]=1 (in other words, assume that |
766 | * translation is desired by OS environment). | 732 | * translation is desired by OS environment). |
767 | */ | 733 | */ |
768 | invalidate_slbs(csa, spu); | 734 | spu_invalidate_slbs(spu); |
769 | get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); | 735 | get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); |
770 | get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); | 736 | get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); |
771 | load_mfc_slb(spu, code_slb, 0); | 737 | load_mfc_slb(spu, code_slb, 0); |
@@ -1718,27 +1684,6 @@ static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) | |||
1718 | } | 1684 | } |
1719 | } | 1685 | } |
1720 | 1686 | ||
1721 | static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
1722 | { | ||
1723 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1724 | int i; | ||
1725 | |||
1726 | /* Restore, Step 68: | ||
1727 | * If MFC_SR1[R]='1', restore SLBs from CSA. | ||
1728 | */ | ||
1729 | if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) { | ||
1730 | for (i = 0; i < 8; i++) { | ||
1731 | out_be64(&priv2->slb_index_W, i); | ||
1732 | eieio(); | ||
1733 | out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]); | ||
1734 | out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]); | ||
1735 | eieio(); | ||
1736 | } | ||
1737 | out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W); | ||
1738 | eieio(); | ||
1739 | } | ||
1740 | } | ||
1741 | |||
1742 | static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) | 1687 | static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) |
1743 | { | 1688 | { |
1744 | /* Restore, Step 69: | 1689 | /* Restore, Step 69: |
@@ -1875,7 +1820,6 @@ static void save_csa(struct spu_state *prev, struct spu *spu) | |||
1875 | set_mfc_tclass_id(prev, spu); /* Step 26. */ | 1820 | set_mfc_tclass_id(prev, spu); /* Step 26. */ |
1876 | purge_mfc_queue(prev, spu); /* Step 27. */ | 1821 | purge_mfc_queue(prev, spu); /* Step 27. */ |
1877 | wait_purge_complete(prev, spu); /* Step 28. */ | 1822 | wait_purge_complete(prev, spu); /* Step 28. */ |
1878 | save_mfc_slbs(prev, spu); /* Step 29. */ | ||
1879 | setup_mfc_sr1(prev, spu); /* Step 30. */ | 1823 | setup_mfc_sr1(prev, spu); /* Step 30. */ |
1880 | save_spu_npc(prev, spu); /* Step 31. */ | 1824 | save_spu_npc(prev, spu); /* Step 31. */ |
1881 | save_spu_privcntl(prev, spu); /* Step 32. */ | 1825 | save_spu_privcntl(prev, spu); /* Step 32. */ |
@@ -1987,7 +1931,7 @@ static void harvest(struct spu_state *prev, struct spu *spu) | |||
1987 | reset_spu_privcntl(prev, spu); /* Step 16. */ | 1931 | reset_spu_privcntl(prev, spu); /* Step 16. */ |
1988 | reset_spu_lslr(prev, spu); /* Step 17. */ | 1932 | reset_spu_lslr(prev, spu); /* Step 17. */ |
1989 | setup_mfc_sr1(prev, spu); /* Step 18. */ | 1933 | setup_mfc_sr1(prev, spu); /* Step 18. */ |
1990 | invalidate_slbs(prev, spu); /* Step 19. */ | 1934 | spu_invalidate_slbs(spu); /* Step 19. */ |
1991 | reset_ch_part1(prev, spu); /* Step 20. */ | 1935 | reset_ch_part1(prev, spu); /* Step 20. */ |
1992 | reset_ch_part2(prev, spu); /* Step 21. */ | 1936 | reset_ch_part2(prev, spu); /* Step 21. */ |
1993 | enable_interrupts(prev, spu); /* Step 22. */ | 1937 | enable_interrupts(prev, spu); /* Step 22. */ |
@@ -2055,7 +1999,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu) | |||
2055 | restore_spu_mb(next, spu); /* Step 65. */ | 1999 | restore_spu_mb(next, spu); /* Step 65. */ |
2056 | check_ppu_mb_stat(next, spu); /* Step 66. */ | 2000 | check_ppu_mb_stat(next, spu); /* Step 66. */ |
2057 | check_ppuint_mb_stat(next, spu); /* Step 67. */ | 2001 | check_ppuint_mb_stat(next, spu); /* Step 67. */ |
2058 | restore_mfc_slbs(next, spu); /* Step 68. */ | 2002 | spu_invalidate_slbs(spu); /* Modified Step 68. */ |
2059 | restore_mfc_sr1(next, spu); /* Step 69. */ | 2003 | restore_mfc_sr1(next, spu); /* Step 69. */ |
2060 | restore_other_spu_access(next, spu); /* Step 70. */ | 2004 | restore_other_spu_access(next, spu); /* Step 70. */ |
2061 | restore_spu_runcntl(next, spu); /* Step 71. */ | 2005 | restore_spu_runcntl(next, spu); /* Step 71. */ |
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 42354de3f557..2014d2b44449 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c | |||
@@ -294,7 +294,7 @@ static int __init ps3_mm_add_memory(void) | |||
294 | unsigned long nr_pages; | 294 | unsigned long nr_pages; |
295 | 295 | ||
296 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | 296 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) |
297 | return 0; | 297 | return -ENODEV; |
298 | 298 | ||
299 | BUG_ON(!mem_init_done); | 299 | BUG_ON(!mem_init_done); |
300 | 300 | ||
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index a9f7e4a39a2a..3c48cce98a5c 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c | |||
@@ -172,7 +172,7 @@ int __init ps3_system_bus_init(void) | |||
172 | int result; | 172 | int result; |
173 | 173 | ||
174 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | 174 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) |
175 | return 0; | 175 | return -ENODEV; |
176 | 176 | ||
177 | result = bus_register(&ps3_system_bus_type); | 177 | result = bus_register(&ps3_system_bus_type); |
178 | BUG_ON(result); | 178 | BUG_ON(result); |