aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-20 02:39:59 -0500
committerPaul Mackerras <paulus@samba.org>2007-12-21 03:46:19 -0500
commit7cd58e43810852eeb7af5a0c803f3890bd08b581 (patch)
treed9ea5c0102d70c26c4a9b18aaf4db4e3b6d48fc1
parent9b1d21f858e8bad750ab19cac23dcbf79d099be3 (diff)
[POWERPC] spufs: move fault, lscsa_alloc and switch code to spufs module
Currently, part of the spufs code (switch.o, lscsa_alloc.o and fault.o) is compiled directly into the kernel. This change moves these components of spufs into the kernel. The lscsa and switch objects are fairly straightforward to move in. For the fault.o module, we split the fault-handling code into two parts: a/p/p/c/spu_fault.c and a/p/p/c/spufs/fault.c. The former is for the in-kernel spu_handle_mm_fault function, and we move the rest of the fault-handling code into spufs. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spu_fault.c98
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c71
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c32
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h47
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c4
-rw-r--r--include/asm-powerpc/spu.h2
-rw-r--r--include/asm-powerpc/spu_csa.h9
10 files changed, 149 insertions, 120 deletions
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 61d12f183036..3cd565a04d0a 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -19,7 +19,7 @@ spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
19spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o 19spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
20 20
21obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 21obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
22 spu_syscalls.o \ 22 spu_syscalls.o spu_fault.o \
23 $(spu-priv1-y) \ 23 $(spu-priv1-y) \
24 $(spu-manage-y) \ 24 $(spu-manage-y) \
25 spufs/ 25 spufs/
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
new file mode 100644
index 000000000000..c8b1cd42905d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -0,0 +1,98 @@
1/*
2 * SPU mm fault handler
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * Author: Jeremy Kerr <jk@ozlabs.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23#include <linux/sched.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26
27#include <asm/spu.h>
28#include <asm/spu_csa.h>
29
30/*
31 * This ought to be kept in sync with the powerpc specific do_page_fault
32 * function. Currently, there are a few corner cases that we haven't had
33 * to handle fortunately.
34 */
35int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
36 unsigned long dsisr, unsigned *flt)
37{
38 struct vm_area_struct *vma;
39 unsigned long is_write;
40 int ret;
41
42#if 0
43 if (!IS_VALID_EA(ea)) {
44 return -EFAULT;
45 }
46#endif /* XXX */
47 if (mm == NULL) {
48 return -EFAULT;
49 }
50 if (mm->pgd == NULL) {
51 return -EFAULT;
52 }
53
54 down_read(&mm->mmap_sem);
55 vma = find_vma(mm, ea);
56 if (!vma)
57 goto bad_area;
58 if (vma->vm_start <= ea)
59 goto good_area;
60 if (!(vma->vm_flags & VM_GROWSDOWN))
61 goto bad_area;
62 if (expand_stack(vma, ea))
63 goto bad_area;
64good_area:
65 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
66 if (is_write) {
67 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area;
69 } else {
70 if (dsisr & MFC_DSISR_ACCESS_DENIED)
71 goto bad_area;
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
73 goto bad_area;
74 }
75 ret = 0;
76 *flt = handle_mm_fault(mm, vma, ea, is_write);
77 if (unlikely(*flt & VM_FAULT_ERROR)) {
78 if (*flt & VM_FAULT_OOM) {
79 ret = -ENOMEM;
80 goto bad_area;
81 } else if (*flt & VM_FAULT_SIGBUS) {
82 ret = -EFAULT;
83 goto bad_area;
84 }
85 BUG();
86 }
87 if (*flt & VM_FAULT_MAJOR)
88 current->maj_flt++;
89 else
90 current->min_flt++;
91 up_read(&mm->mmap_sem);
92 return ret;
93
94bad_area:
95 up_read(&mm->mmap_sem);
96 return -EFAULT;
97}
98EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index 328afcf89503..d3a349fb42e5 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,8 +1,8 @@
1obj-y += switch.o fault.o lscsa_alloc.o
2 1
3obj-$(CONFIG_SPU_FS) += spufs.o 2obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o coredump.o 3spufs-y += inode.o file.o context.o syscalls.o coredump.o
5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
5spufs-y += switch.o fault.o lscsa_alloc.o
6 6
7# Rules to build switch.o with the help of SPU tool chain 7# Rules to build switch.o with the help of SPU tool chain
8SPU_CROSS := spu- 8SPU_CROSS := spu-
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 917eab4be486..0635f292ae19 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -28,75 +28,6 @@
28 28
29#include "spufs.h" 29#include "spufs.h"
30 30
31/*
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
35 */
36static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
37 unsigned long dsisr, unsigned *flt)
38{
39 struct vm_area_struct *vma;
40 unsigned long is_write;
41 int ret;
42
43#if 0
44 if (!IS_VALID_EA(ea)) {
45 return -EFAULT;
46 }
47#endif /* XXX */
48 if (mm == NULL) {
49 return -EFAULT;
50 }
51 if (mm->pgd == NULL) {
52 return -EFAULT;
53 }
54
55 down_read(&mm->mmap_sem);
56 vma = find_vma(mm, ea);
57 if (!vma)
58 goto bad_area;
59 if (vma->vm_start <= ea)
60 goto good_area;
61 if (!(vma->vm_flags & VM_GROWSDOWN))
62 goto bad_area;
63 if (expand_stack(vma, ea))
64 goto bad_area;
65good_area:
66 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
67 if (is_write) {
68 if (!(vma->vm_flags & VM_WRITE))
69 goto bad_area;
70 } else {
71 if (dsisr & MFC_DSISR_ACCESS_DENIED)
72 goto bad_area;
73 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
74 goto bad_area;
75 }
76 ret = 0;
77 *flt = handle_mm_fault(mm, vma, ea, is_write);
78 if (unlikely(*flt & VM_FAULT_ERROR)) {
79 if (*flt & VM_FAULT_OOM) {
80 ret = -ENOMEM;
81 goto bad_area;
82 } else if (*flt & VM_FAULT_SIGBUS) {
83 ret = -EFAULT;
84 goto bad_area;
85 }
86 BUG();
87 }
88 if (*flt & VM_FAULT_MAJOR)
89 current->maj_flt++;
90 else
91 current->min_flt++;
92 up_read(&mm->mmap_sem);
93 return ret;
94
95bad_area:
96 up_read(&mm->mmap_sem);
97 return -EFAULT;
98}
99
100static void spufs_handle_dma_error(struct spu_context *ctx, 31static void spufs_handle_dma_error(struct spu_context *ctx,
101 unsigned long ea, int type) 32 unsigned long ea, int type)
102{ 33{
@@ -138,7 +69,6 @@ void spufs_dma_callback(struct spu *spu, int type)
138{ 69{
139 spufs_handle_dma_error(spu->ctx, spu->dar, type); 70 spufs_handle_dma_error(spu->ctx, spu->dar, type);
140} 71}
141EXPORT_SYMBOL_GPL(spufs_dma_callback);
142 72
143/* 73/*
144 * bottom half handler for page faults, we can't do this from 74 * bottom half handler for page faults, we can't do this from
@@ -227,4 +157,3 @@ int spufs_handle_class1(struct spu_context *ctx)
227 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 157 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
228 return ret; 158 return ret;
229} 159}
230EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index d606e575a204..0e9f325c9ff7 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -28,6 +28,8 @@
28#include <asm/spu_csa.h> 28#include <asm/spu_csa.h>
29#include <asm/mmu.h> 29#include <asm/mmu.h>
30 30
31#include "spufs.h"
32
31static int spu_alloc_lscsa_std(struct spu_state *csa) 33static int spu_alloc_lscsa_std(struct spu_state *csa)
32{ 34{
33 struct spu_lscsa *lscsa; 35 struct spu_lscsa *lscsa;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 0117eb8f6a91..ee80de07c0bc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -892,6 +892,38 @@ static int spusched_thread(void *unused)
892 return 0; 892 return 0;
893} 893}
894 894
895void spuctx_switch_state(struct spu_context *ctx,
896 enum spu_utilization_state new_state)
897{
898 unsigned long long curtime;
899 signed long long delta;
900 struct timespec ts;
901 struct spu *spu;
902 enum spu_utilization_state old_state;
903
904 ktime_get_ts(&ts);
905 curtime = timespec_to_ns(&ts);
906 delta = curtime - ctx->stats.tstamp;
907
908 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
909 WARN_ON(delta < 0);
910
911 spu = ctx->spu;
912 old_state = ctx->stats.util_state;
913 ctx->stats.util_state = new_state;
914 ctx->stats.tstamp = curtime;
915
916 /*
917 * Update the physical SPU utilization statistics.
918 */
919 if (spu) {
920 ctx->stats.times[old_state] += delta;
921 spu->stats.times[old_state] += delta;
922 spu->stats.util_state = new_state;
923 spu->stats.tstamp = curtime;
924 }
925}
926
895#define LOAD_INT(x) ((x) >> FSHIFT) 927#define LOAD_INT(x) ((x) >> FSHIFT)
896#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 928#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
897 929
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5e92ad32cc9c..cce50f317c78 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -307,41 +307,16 @@ struct spufs_coredump_reader {
307extern struct spufs_coredump_reader spufs_coredump_read[]; 307extern struct spufs_coredump_reader spufs_coredump_read[];
308extern int spufs_coredump_num_notes; 308extern int spufs_coredump_num_notes;
309 309
310/* 310extern int spu_init_csa(struct spu_state *csa);
311 * This function is a little bit too large for an inline, but 311extern void spu_fini_csa(struct spu_state *csa);
312 * as fault.c is built into the kernel we can't move it out of 312extern int spu_save(struct spu_state *prev, struct spu *spu);
313 * line. 313extern int spu_restore(struct spu_state *new, struct spu *spu);
314 */ 314extern int spu_switch(struct spu_state *prev, struct spu_state *new,
315static inline void spuctx_switch_state(struct spu_context *ctx, 315 struct spu *spu);
316 enum spu_utilization_state new_state) 316extern int spu_alloc_lscsa(struct spu_state *csa);
317{ 317extern void spu_free_lscsa(struct spu_state *csa);
318 unsigned long long curtime; 318
319 signed long long delta; 319extern void spuctx_switch_state(struct spu_context *ctx,
320 struct timespec ts; 320 enum spu_utilization_state new_state);
321 struct spu *spu;
322 enum spu_utilization_state old_state;
323
324 ktime_get_ts(&ts);
325 curtime = timespec_to_ns(&ts);
326 delta = curtime - ctx->stats.tstamp;
327
328 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
329 WARN_ON(delta < 0);
330
331 spu = ctx->spu;
332 old_state = ctx->stats.util_state;
333 ctx->stats.util_state = new_state;
334 ctx->stats.tstamp = curtime;
335
336 /*
337 * Update the physical SPU utilization statistics.
338 */
339 if (spu) {
340 ctx->stats.times[old_state] += delta;
341 spu->stats.times[old_state] += delta;
342 spu->stats.util_state = new_state;
343 spu->stats.tstamp = curtime;
344 }
345}
346 321
347#endif 322#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 8cbc6574820f..c9600e8e0e16 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -48,6 +48,8 @@
48#include <asm/spu_csa.h> 48#include <asm/spu_csa.h>
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50 50
51#include "spufs.h"
52
51#include "spu_save_dump.h" 53#include "spu_save_dump.h"
52#include "spu_restore_dump.h" 54#include "spu_restore_dump.h"
53 55
@@ -2187,10 +2189,8 @@ int spu_init_csa(struct spu_state *csa)
2187 2189
2188 return 0; 2190 return 0;
2189} 2191}
2190EXPORT_SYMBOL_GPL(spu_init_csa);
2191 2192
2192void spu_fini_csa(struct spu_state *csa) 2193void spu_fini_csa(struct spu_state *csa)
2193{ 2194{
2194 spu_free_lscsa(csa); 2195 spu_free_lscsa(csa);
2195} 2196}
2196EXPORT_SYMBOL_GPL(spu_fini_csa);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 90aadf5bed2c..543c83c2dc62 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -283,6 +283,8 @@ void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
283int spu_add_sysdev_attr_group(struct attribute_group *attrs); 283int spu_add_sysdev_attr_group(struct attribute_group *attrs);
284void spu_remove_sysdev_attr_group(struct attribute_group *attrs); 284void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
285 285
286int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
287 unsigned long dsisr, unsigned *flt);
286 288
287/* 289/*
288 * Notifier blocks: 290 * Notifier blocks:
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 867bc2667330..34da5c685170 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -259,15 +259,6 @@ struct spu_state {
259 spinlock_t register_lock; 259 spinlock_t register_lock;
260}; 260};
261 261
262extern int spu_init_csa(struct spu_state *csa);
263extern void spu_fini_csa(struct spu_state *csa);
264extern int spu_save(struct spu_state *prev, struct spu *spu);
265extern int spu_restore(struct spu_state *new, struct spu *spu);
266extern int spu_switch(struct spu_state *prev, struct spu_state *new,
267 struct spu *spu);
268extern int spu_alloc_lscsa(struct spu_state *csa);
269extern void spu_free_lscsa(struct spu_state *csa);
270
271#endif /* !__SPU__ */ 262#endif /* !__SPU__ */
272#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
273#endif /* !__ASSEMBLY__ */ 264#endif /* !__ASSEMBLY__ */