aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-11-15 15:53:48 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:49:12 -0500
commit67207b9664a8d603138ef1556141e6d0a102bea7 (patch)
treee98886778be65aeb6625a5f516873bbc5beeb978
parentd7a301033f1990188f65abf4fe8e5b90ef0e3888 (diff)
[PATCH] spufs: The SPU file system, base
This is the current version of the spu file system, used for driving SPEs on the Cell Broadband Engine. This release is almost identical to the version for the 2.6.14 kernel posted earlier, which is available as part of the Cell BE Linux distribution from http://www.bsc.es/projects/deepcomputing/linuxoncell/. The first patch provides all the interfaces for running spu application, but does not have any support for debugging SPU tasks or for scheduling. Both these functionalities are added in the subsequent patches. See Documentation/filesystems/spufs.txt on how to use spufs. Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--Documentation/filesystems/spufs.txt521
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig13
-rw-r--r--arch/powerpc/platforms/cell/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c740
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c86
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c67
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c596
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c470
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h71
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c106
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c1
-rw-r--r--include/asm-powerpc/spu.h498
-rw-r--r--include/asm-powerpc/unistd.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--mm/memory.c2
20 files changed, 3189 insertions, 1 deletions
diff --git a/Documentation/filesystems/spufs.txt b/Documentation/filesystems/spufs.txt
new file mode 100644
index 000000000000..8edc3952eff4
--- /dev/null
+++ b/Documentation/filesystems/spufs.txt
@@ -0,0 +1,521 @@
1SPUFS(2) Linux Programmer's Manual SPUFS(2)
2
3
4
5NAME
6 spufs - the SPU file system
7
8
9DESCRIPTION
10 The SPU file system is used on PowerPC machines that implement the Cell
11 Broadband Engine Architecture in order to access Synergistic Processor
12 Units (SPUs).
13
14 The file system provides a name space similar to posix shared memory or
15 message queues. Users that have write permissions on the file system
16 can use spu_create(2) to establish SPU contexts in the spufs root.
17
18 Every SPU context is represented by a directory containing a predefined
19 set of files. These files can be used for manipulating the state of the
20 logical SPU. Users can change permissions on those files, but not actu-
21 ally add or remove files.
22
23
24MOUNT OPTIONS
25 uid=<uid>
26 set the user owning the mount point, the default is 0 (root).
27
28 gid=<gid>
29 set the group owning the mount point, the default is 0 (root).
30
31
32FILES
33 The files in spufs mostly follow the standard behavior for regular sys-
34 tem calls like read(2) or write(2), but often support only a subset of
35 the operations supported on regular file systems. This list details the
36 supported operations and the deviations from the behaviour in the
37 respective man pages.
38
39 All files that support the read(2) operation also support readv(2) and
40 all files that support the write(2) operation also support writev(2).
41 All files support the access(2) and stat(2) family of operations, but
42 only the st_mode, st_nlink, st_uid and st_gid fields of struct stat
43 contain reliable information.
44
45 All files support the chmod(2)/fchmod(2) and chown(2)/fchown(2) opera-
46 tions, but will not be able to grant permissions that contradict the
47 possible operations, e.g. read access on the wbox file.
48
49 The current set of files is:
50
51
52 /mem
53 the contents of the local storage memory of the SPU. This can be
54 accessed like a regular shared memory file and contains both code and
55 data in the address space of the SPU. The possible operations on an
56 open mem file are:
57
58 read(2), pread(2), write(2), pwrite(2), lseek(2)
59 These operate as documented, with the exception that seek(2),
60 write(2) and pwrite(2) are not supported beyond the end of the
61 file. The file size is the size of the local storage of the SPU,
62 which normally is 256 kilobytes.
63
64 mmap(2)
65 Mapping mem into the process address space gives access to the
66 SPU local storage within the process address space. Only
67 MAP_SHARED mappings are allowed.
68
69
70 /mbox
71 The first SPU to CPU communication mailbox. This file is read-only and
72 can be read in units of 32 bits. The file can only be used in non-
73 blocking mode and it even poll() will not block on it. The possible
74 operations on an open mbox file are:
75
76 read(2)
77 If a count smaller than four is requested, read returns -1 and
78 sets errno to EINVAL. If there is no data available in the mail
79 box, the return value is set to -1 and errno becomes EAGAIN.
80 When data has been read successfully, four bytes are placed in
81 the data buffer and the value four is returned.
82
83
84 /ibox
85 The second SPU to CPU communication mailbox. This file is similar to
86 the first mailbox file, but can be read in blocking I/O mode, and the
87 poll familiy of system calls can be used to wait for it. The possible
88 operations on an open ibox file are:
89
90 read(2)
91 If a count smaller than four is requested, read returns -1 and
92 sets errno to EINVAL. If there is no data available in the mail
93 box and the file descriptor has been opened with O_NONBLOCK, the
94 return value is set to -1 and errno becomes EAGAIN.
95
96 If there is no data available in the mail box and the file
97 descriptor has been opened without O_NONBLOCK, the call will
98 block until the SPU writes to its interrupt mailbox channel.
99 When data has been read successfully, four bytes are placed in
100 the data buffer and the value four is returned.
101
102 poll(2)
103 Poll on the ibox file returns (POLLIN | POLLRDNORM) whenever
104 data is available for reading.
105
106
107 /wbox
108 The CPU to SPU communation mailbox. It is write-only can can be written
109 in units of 32 bits. If the mailbox is full, write() will block and
110 poll can be used to wait for it becoming empty again. The possible
111 operations on an open wbox file are: write(2) If a count smaller than
112 four is requested, write returns -1 and sets errno to EINVAL. If there
113 is no space available in the mail box and the file descriptor has been
114 opened with O_NONBLOCK, the return value is set to -1 and errno becomes
115 EAGAIN.
116
117 If there is no space available in the mail box and the file descriptor
118 has been opened without O_NONBLOCK, the call will block until the SPU
119 reads from its PPE mailbox channel. When data has been read success-
120 fully, four bytes are placed in the data buffer and the value four is
121 returned.
122
123 poll(2)
124 Poll on the ibox file returns (POLLOUT | POLLWRNORM) whenever
125 space is available for writing.
126
127
128 /mbox_stat
129 /ibox_stat
130 /wbox_stat
131 Read-only files that contain the length of the current queue, i.e. how
132 many words can be read from mbox or ibox or how many words can be
133 written to wbox without blocking. The files can be read only in 4-byte
134 units and return a big-endian binary integer number. The possible
135 operations on an open *box_stat file are:
136
137 read(2)
138 If a count smaller than four is requested, read returns -1 and
139 sets errno to EINVAL. Otherwise, a four byte value is placed in
140 the data buffer, containing the number of elements that can be
141 read from (for mbox_stat and ibox_stat) or written to (for
142 wbox_stat) the respective mail box without blocking or resulting
143 in EAGAIN.
144
145
146 /npc
147 /decr
148 /decr_status
149 /spu_tag_mask
150 /event_mask
151 /srr0
152 Internal registers of the SPU. The representation is an ASCII string
153 with the numeric value of the next instruction to be executed. These
154 can be used in read/write mode for debugging, but normal operation of
155 programs should not rely on them because access to any of them except
156 npc requires an SPU context save and is therefore very inefficient.
157
158 The contents of these files are:
159
160 npc Next Program Counter
161
162 decr SPU Decrementer
163
164 decr_status Decrementer Status
165
166 spu_tag_mask MFC tag mask for SPU DMA
167
168 event_mask Event mask for SPU interrupts
169
170 srr0 Interrupt Return address register
171
172
173 The possible operations on an open npc, decr, decr_status,
174 spu_tag_mask, event_mask or srr0 file are:
175
176 read(2)
177 When the count supplied to the read call is shorter than the
178 required length for the pointer value plus a newline character,
179 subsequent reads from the same file descriptor will result in
180 completing the string, regardless of changes to the register by
181 a running SPU task. When a complete string has been read, all
182 subsequent read operations will return zero bytes and a new file
183 descriptor needs to be opened to read the value again.
184
185 write(2)
186 A write operation on the file results in setting the register to
187 the value given in the string. The string is parsed from the
188 beginning to the first non-numeric character or the end of the
189 buffer. Subsequent writes to the same file descriptor overwrite
190 the previous setting.
191
192
193 /fpcr
194 This file gives access to the Floating Point Status and Control Regis-
195 ter as a four byte long file. The operations on the fpcr file are:
196
197 read(2)
198 If a count smaller than four is requested, read returns -1 and
199 sets errno to EINVAL. Otherwise, a four byte value is placed in
200 the data buffer, containing the current value of the fpcr regis-
201 ter.
202
203 write(2)
204 If a count smaller than four is requested, write returns -1 and
205 sets errno to EINVAL. Otherwise, a four byte value is copied
206 from the data buffer, updating the value of the fpcr register.
207
208
209 /signal1
210 /signal2
211 The two signal notification channels of an SPU. These are read-write
212 files that operate on a 32 bit word. Writing to one of these files
213 triggers an interrupt on the SPU. The value writting to the signal
214 files can be read from the SPU through a channel read or from host user
215 space through the file. After the value has been read by the SPU, it
216 is reset to zero. The possible operations on an open signal1 or sig-
217 nal2 file are:
218
219 read(2)
220 If a count smaller than four is requested, read returns -1 and
221 sets errno to EINVAL. Otherwise, a four byte value is placed in
222 the data buffer, containing the current value of the specified
223 signal notification register.
224
225 write(2)
226 If a count smaller than four is requested, write returns -1 and
227 sets errno to EINVAL. Otherwise, a four byte value is copied
228 from the data buffer, updating the value of the specified signal
229 notification register. The signal notification register will
230 either be replaced with the input data or will be updated to the
231 bitwise OR or the old value and the input data, depending on the
232 contents of the signal1_type, or signal2_type respectively,
233 file.
234
235
236 /signal1_type
237 /signal2_type
238 These two files change the behavior of the signal1 and signal2 notifi-
239 cation files. The contain a numerical ASCII string which is read as
240 either "1" or "0". In mode 0 (overwrite), the hardware replaces the
241 contents of the signal channel with the data that is written to it. in
242 mode 1 (logical OR), the hardware accumulates the bits that are subse-
243 quently written to it. The possible operations on an open signal1_type
244 or signal2_type file are:
245
246 read(2)
247 When the count supplied to the read call is shorter than the
248 required length for the digit plus a newline character, subse-
249 quent reads from the same file descriptor will result in com-
250 pleting the string. When a complete string has been read, all
251 subsequent read operations will return zero bytes and a new file
252 descriptor needs to be opened to read the value again.
253
254 write(2)
255 A write operation on the file results in setting the register to
256 the value given in the string. The string is parsed from the
257 beginning to the first non-numeric character or the end of the
258 buffer. Subsequent writes to the same file descriptor overwrite
259 the previous setting.
260
261
262EXAMPLES
263 /etc/fstab entry
264 none /spu spufs gid=spu 0 0
265
266
267AUTHORS
268 Arnd Bergmann <arndb@de.ibm.com>, Mark Nutter <mnutter@us.ibm.com>,
269 Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
270
271SEE ALSO
272 capabilities(7), close(2), spu_create(2), spu_run(2), spufs(7)
273
274
275
276Linux 2005-09-28 SPUFS(2)
277
278------------------------------------------------------------------------------
279
280SPU_RUN(2) Linux Programmer's Manual SPU_RUN(2)
281
282
283
284NAME
285 spu_run - execute an spu context
286
287
288SYNOPSIS
289 #include <sys/spu.h>
290
291 int spu_run(int fd, unsigned int *npc, unsigned int *event);
292
293DESCRIPTION
294 The spu_run system call is used on PowerPC machines that implement the
295 Cell Broadband Engine Architecture in order to access Synergistic Pro-
296 cessor Units (SPUs). It uses the fd that was returned from spu_cre-
297 ate(2) to address a specific SPU context. When the context gets sched-
298 uled to a physical SPU, it starts execution at the instruction pointer
299 passed in npc.
300
301 Execution of SPU code happens synchronously, meaning that spu_run does
302 not return while the SPU is still running. If there is a need to exe-
303 cute SPU code in parallel with other code on either the main CPU or
304 other SPUs, you need to create a new thread of execution first, e.g.
305 using the pthread_create(3) call.
306
307 When spu_run returns, the current value of the SPU instruction pointer
308 is written back to npc, so you can call spu_run again without updating
309 the pointers.
310
311 event can be a NULL pointer or point to an extended status code that
312 gets filled when spu_run returns. It can be one of the following con-
313 stants:
314
315 SPE_EVENT_DMA_ALIGNMENT
316 A DMA alignment error
317
318 SPE_EVENT_SPE_DATA_SEGMENT
319 A DMA segmentation error
320
321 SPE_EVENT_SPE_DATA_STORAGE
322 A DMA storage error
323
324 If NULL is passed as the event argument, these errors will result in a
325 signal delivered to the calling process.
326
327RETURN VALUE
328 spu_run returns the value of the spu_status register or -1 to indicate
329 an error and set errno to one of the error codes listed below. The
330 spu_status register value contains a bit mask of status codes and
331 optionally a 14 bit code returned from the stop-and-signal instruction
332 on the SPU. The bit masks for the status codes are:
333
334 0x02 SPU was stopped by stop-and-signal.
335
336 0x04 SPU was stopped by halt.
337
338 0x08 SPU is waiting for a channel.
339
340 0x10 SPU is in single-step mode.
341
342 0x20 SPU has tried to execute an invalid instruction.
343
344 0x40 SPU has tried to access an invalid channel.
345
346 0x3fff0000
347 The bits masked with this value contain the code returned from
348 stop-and-signal.
349
350 There are always one or more of the lower eight bits set or an error
351 code is returned from spu_run.
352
353ERRORS
354 EAGAIN or EWOULDBLOCK
355 fd is in non-blocking mode and spu_run would block.
356
357 EBADF fd is not a valid file descriptor.
358
359 EFAULT npc is not a valid pointer or status is neither NULL nor a valid
360 pointer.
361
362 EINTR A signal occured while spu_run was in progress. The npc value
363 has been updated to the new program counter value if necessary.
364
365 EINVAL fd is not a file descriptor returned from spu_create(2).
366
367 ENOMEM Insufficient memory was available to handle a page fault result-
368 ing from an MFC direct memory access.
369
370 ENOSYS the functionality is not provided by the current system, because
371 either the hardware does not provide SPUs or the spufs module is
372 not loaded.
373
374
375NOTES
376 spu_run is meant to be used from libraries that implement a more
377 abstract interface to SPUs, not to be used from regular applications.
378 See http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
379 ommended libraries.
380
381
382CONFORMING TO
383 This call is Linux specific and only implemented by the ppc64 architec-
384 ture. Programs using this system call are not portable.
385
386
387BUGS
388 The code does not yet fully implement all features lined out here.
389
390
391AUTHOR
392 Arnd Bergmann <arndb@de.ibm.com>
393
394SEE ALSO
395 capabilities(7), close(2), spu_create(2), spufs(7)
396
397
398
399Linux 2005-09-28 SPU_RUN(2)
400
401------------------------------------------------------------------------------
402
403SPU_CREATE(2) Linux Programmer's Manual SPU_CREATE(2)
404
405
406
407NAME
408 spu_create - create a new spu context
409
410
411SYNOPSIS
412 #include <sys/types.h>
413 #include <sys/spu.h>
414
415 int spu_create(const char *pathname, int flags, mode_t mode);
416
417DESCRIPTION
418 The spu_create system call is used on PowerPC machines that implement
419 the Cell Broadband Engine Architecture in order to access Synergistic
420 Processor Units (SPUs). It creates a new logical context for an SPU in
421 pathname and returns a handle to associated with it. pathname must
422 point to a non-existing directory in the mount point of the SPU file
423 system (spufs). When spu_create is successful, a directory gets cre-
424 ated on pathname and it is populated with files.
425
426 The returned file handle can only be passed to spu_run(2) or closed,
427 other operations are not defined on it. When it is closed, all associ-
428 ated directory entries in spufs are removed. When the last file handle
429 pointing either inside of the context directory or to this file
430 descriptor is closed, the logical SPU context is destroyed.
431
432 The parameter flags can be zero or any bitwise or'd combination of the
433 following constants:
434
435 SPU_RAWIO
436 Allow mapping of some of the hardware registers of the SPU into
437 user space. This flag requires the CAP_SYS_RAWIO capability, see
438 capabilities(7).
439
440 The mode parameter specifies the permissions used for creating the new
441 directory in spufs. mode is modified with the user's umask(2) value
442 and then used for both the directory and the files contained in it. The
443 file permissions mask out some more bits of mode because they typically
444 support only read or write access. See stat(2) for a full list of the
445 possible mode values.
446
447
448RETURN VALUE
449 spu_create returns a new file descriptor. It may return -1 to indicate
450 an error condition and set errno to one of the error codes listed
451 below.
452
453
454ERRORS
455 EACCESS
456 The current user does not have write access on the spufs mount
457 point.
458
459 EEXIST An SPU context already exists at the given path name.
460
461 EFAULT pathname is not a valid string pointer in the current address
462 space.
463
464 EINVAL pathname is not a directory in the spufs mount point.
465
466 ELOOP Too many symlinks were found while resolving pathname.
467
468 EMFILE The process has reached its maximum open file limit.
469
470 ENAMETOOLONG
471 pathname was too long.
472
473 ENFILE The system has reached the global open file limit.
474
475 ENOENT Part of pathname could not be resolved.
476
477 ENOMEM The kernel could not allocate all resources required.
478
479 ENOSPC There are not enough SPU resources available to create a new
480 context or the user specific limit for the number of SPU con-
481 texts has been reached.
482
483 ENOSYS the functionality is not provided by the current system, because
484 either the hardware does not provide SPUs or the spufs module is
485 not loaded.
486
487 ENOTDIR
488 A part of pathname is not a directory.
489
490
491
492NOTES
493 spu_create is meant to be used from libraries that implement a more
494 abstract interface to SPUs, not to be used from regular applications.
495 See http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
496 ommended libraries.
497
498
499FILES
500 pathname must point to a location beneath the mount point of spufs. By
501 convention, it gets mounted in /spu.
502
503
504CONFORMING TO
505 This call is Linux specific and only implemented by the ppc64 architec-
506 ture. Programs using this system call are not portable.
507
508
509BUGS
510 The code does not yet fully implement all features lined out here.
511
512
513AUTHOR
514 Arnd Bergmann <arndb@de.ibm.com>
515
516SEE ALSO
517 capabilities(7), close(2), spu_run(2), spufs(7)
518
519
520
521Linux 2005-09-28 SPU_CREATE(2)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4d71aa3ecbb5..39ca7b9da369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -482,6 +482,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig
482source arch/powerpc/platforms/4xx/Kconfig 482source arch/powerpc/platforms/4xx/Kconfig
483source arch/powerpc/platforms/85xx/Kconfig 483source arch/powerpc/platforms/85xx/Kconfig
484source arch/powerpc/platforms/8xx/Kconfig 484source arch/powerpc/platforms/8xx/Kconfig
485source arch/powerpc/platforms/cell/Kconfig
485 486
486menu "Kernel options" 487menu "Kernel options"
487 488
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 4bb3650420b4..989f6286991a 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -319,3 +319,5 @@ COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init) 319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch) 320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch) 321SYSCALL(inotify_rm_watch)
322SYSCALL(spu_run)
323SYSCALL(spu_create)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a606504678bd..846a1894cf95 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
644 DBG_LOW(" -> rc=%d\n", rc); 644 DBG_LOW(" -> rc=%d\n", rc);
645 return rc; 645 return rc;
646} 646}
647EXPORT_SYMBOL_GPL(hash_page);
647 648
648void hash_preload(struct mm_struct *mm, unsigned long ea, 649void hash_preload(struct mm_struct *mm, unsigned long ea,
649 unsigned long access, unsigned long trap) 650 unsigned long access, unsigned long trap)
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644
index 000000000000..3157071e241c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -0,0 +1,13 @@
1menu "Cell Broadband Engine options"
2 depends on PPC_CELL
3
4config SPU_FS
5 tristate "SPU file system"
6 default m
7 depends on PPC_CELL
8 help
9 The SPU file system is used to access Synergistic Processing
10 Units on machines implementing the Broadband Processor
11 Architecture.
12
13endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 55e094b96bc0..74616cf13af9 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,2 +1,5 @@
1obj-y += interrupt.o iommu.o setup.o spider-pic.o 1obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-$(CONFIG_SMP) += smp.o 2obj-$(CONFIG_SMP) += smp.o
3obj-$(CONFIG_SPU_FS) += spufs/ spu_base.o
4builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
5obj-y += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644
index 000000000000..9e9096590a07
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -0,0 +1,740 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#define DEBUG 1
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
65 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
66}
67
68static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
69{
70 struct spu_priv2 __iomem *priv2;
71 struct mm_struct *mm;
72
73 pr_debug("%s\n", __FUNCTION__);
74
75 if (REGION_ID(ea) != USER_REGION_ID) {
76 pr_debug("invalid region access at %016lx\n", ea);
77 return 1;
78 }
79
80 priv2 = spu->priv2;
81 mm = spu->mm;
82
83 if (spu->slb_replace >= 8)
84 spu->slb_replace = 0;
85
86 out_be64(&priv2->slb_index_W, spu->slb_replace);
87 out_be64(&priv2->slb_vsid_RW,
88 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
89 | SLB_VSID_USER);
90 out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
91
92 spu_restart_dma(spu);
93
94 pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
95 spu->slb_replace, mm->context.id, ea,
96 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
97 (ea & ESID_MASK) | SLB_ESID_V);
98 return 0;
99}
100
101static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
102{
103 unsigned long dsisr;
104 struct spu_priv1 __iomem *priv1;
105
106 pr_debug("%s\n", __FUNCTION__);
107 priv1 = spu->priv1;
108 dsisr = in_be64(&priv1->mfc_dsisr_RW);
109
110 wake_up(&spu->stop_wq);
111
112 return 0;
113}
114
115static int __spu_trap_mailbox(struct spu *spu)
116{
117 wake_up_all(&spu->ibox_wq);
118 kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN);
119
120 /* atomically disable SPU mailbox interrupts */
121 spin_lock(&spu->register_lock);
122 out_be64(&spu->priv1->int_mask_class2_RW,
123 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
124 spin_unlock(&spu->register_lock);
125 return 0;
126}
127
128static int __spu_trap_stop(struct spu *spu)
129{
130 pr_debug("%s\n", __FUNCTION__);
131 spu->stop_code = in_be32(&spu->problem->spu_status_R);
132 wake_up(&spu->stop_wq);
133 return 0;
134}
135
136static int __spu_trap_halt(struct spu *spu)
137{
138 pr_debug("%s\n", __FUNCTION__);
139 spu->stop_code = in_be32(&spu->problem->spu_status_R);
140 wake_up(&spu->stop_wq);
141 return 0;
142}
143
144static int __spu_trap_tag_group(struct spu *spu)
145{
146 pr_debug("%s\n", __FUNCTION__);
147 /* wake_up(&spu->dma_wq); */
148 return 0;
149}
150
151static int __spu_trap_spubox(struct spu *spu)
152{
153 wake_up_all(&spu->wbox_wq);
154 kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT);
155
156 /* atomically disable SPU mailbox interrupts */
157 spin_lock(&spu->register_lock);
158 out_be64(&spu->priv1->int_mask_class2_RW,
159 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
160 spin_unlock(&spu->register_lock);
161 return 0;
162}
163
164static irqreturn_t
165spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
166{
167 struct spu *spu;
168
169 spu = data;
170 spu->class_0_pending = 1;
171 wake_up(&spu->stop_wq);
172
173 return IRQ_HANDLED;
174}
175
176static int
177spu_irq_class_0_bottom(struct spu *spu)
178{
179 unsigned long stat;
180
181 spu->class_0_pending = 0;
182
183 stat = in_be64(&spu->priv1->int_stat_class0_RW);
184
185 if (stat & 1) /* invalid MFC DMA */
186 __spu_trap_invalid_dma(spu);
187
188 if (stat & 2) /* invalid DMA alignment */
189 __spu_trap_dma_align(spu);
190
191 if (stat & 4) /* error on SPU */
192 __spu_trap_error(spu);
193
194 out_be64(&spu->priv1->int_stat_class0_RW, stat);
195 return 0;
196}
197
198static irqreturn_t
199spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
200{
201 struct spu *spu;
202 unsigned long stat, dar;
203
204 spu = data;
205 stat = in_be64(&spu->priv1->int_stat_class1_RW);
206 dar = in_be64(&spu->priv1->mfc_dar_RW);
207
208 if (stat & 1) /* segment fault */
209 __spu_trap_data_seg(spu, dar);
210
211 if (stat & 2) { /* mapping fault */
212 __spu_trap_data_map(spu, dar);
213 }
214
215 if (stat & 4) /* ls compare & suspend on get */
216 ;
217
218 if (stat & 8) /* ls compare & suspend on put */
219 ;
220
221 out_be64(&spu->priv1->int_stat_class1_RW, stat);
222 return stat ? IRQ_HANDLED : IRQ_NONE;
223}
224
225static irqreturn_t
226spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
227{
228 struct spu *spu;
229 unsigned long stat;
230
231 spu = data;
232 stat = in_be64(&spu->priv1->int_stat_class2_RW);
233
234 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
235 in_be64(&spu->priv1->int_mask_class2_RW));
236
237
238 if (stat & 1) /* PPC core mailbox */
239 __spu_trap_mailbox(spu);
240
241 if (stat & 2) /* SPU stop-and-signal */
242 __spu_trap_stop(spu);
243
244 if (stat & 4) /* SPU halted */
245 __spu_trap_halt(spu);
246
247 if (stat & 8) /* DMA tag group complete */
248 __spu_trap_tag_group(spu);
249
250 if (stat & 0x10) /* SPU mailbox threshold */
251 __spu_trap_spubox(spu);
252
253 out_be64(&spu->priv1->int_stat_class2_RW, stat);
254 return stat ? IRQ_HANDLED : IRQ_NONE;
255}
256
257static int
258spu_request_irqs(struct spu *spu)
259{
260 int ret;
261 int irq_base;
262
263 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
264
265 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
266 ret = request_irq(irq_base + spu->isrc,
267 spu_irq_class_0, 0, spu->irq_c0, spu);
268 if (ret)
269 goto out;
270 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
271
272 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
273 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
274 spu_irq_class_1, 0, spu->irq_c1, spu);
275 if (ret)
276 goto out1;
277 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
278
279 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
280 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
281 spu_irq_class_2, 0, spu->irq_c2, spu);
282 if (ret)
283 goto out2;
284 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
285 goto out;
286
287out2:
288 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
289out1:
290 free_irq(irq_base + spu->isrc, spu);
291out:
292 return ret;
293}
294
295static void
296spu_free_irqs(struct spu *spu)
297{
298 int irq_base;
299
300 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
301
302 free_irq(irq_base + spu->isrc, spu);
303 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
304 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
305}
306
307static LIST_HEAD(spu_list);
308static DECLARE_MUTEX(spu_mutex);
309
310static void spu_init_channels(struct spu *spu)
311{
312 static const struct {
313 unsigned channel;
314 unsigned count;
315 } zero_list[] = {
316 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
317 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
318 }, count_list[] = {
319 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
320 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
321 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
322 };
323 struct spu_priv2 *priv2;
324 int i;
325
326 priv2 = spu->priv2;
327
328 /* initialize all channel data to zero */
329 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
330 int count;
331
332 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
333 for (count = 0; count < zero_list[i].count; count++)
334 out_be64(&priv2->spu_chnldata_RW, 0);
335 }
336
337 /* initialize channel counts to meaningful values */
338 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
339 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
340 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
341 }
342}
343
344static void spu_init_regs(struct spu *spu)
345{
346 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
347 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
348 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
349}
350
351struct spu *spu_alloc(void)
352{
353 struct spu *spu;
354
355 down(&spu_mutex);
356 if (!list_empty(&spu_list)) {
357 spu = list_entry(spu_list.next, struct spu, list);
358 list_del_init(&spu->list);
359 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
360 } else {
361 pr_debug("No SPU left\n");
362 spu = NULL;
363 }
364 up(&spu_mutex);
365
366 if (spu) {
367 spu_init_channels(spu);
368 spu_init_regs(spu);
369 }
370
371 return spu;
372}
373EXPORT_SYMBOL(spu_alloc);
374
375void spu_free(struct spu *spu)
376{
377 down(&spu_mutex);
378 spu->ibox_fasync = NULL;
379 spu->wbox_fasync = NULL;
380 list_add_tail(&spu->list, &spu_list);
381 up(&spu_mutex);
382}
383EXPORT_SYMBOL(spu_free);
384
385extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
386static int spu_handle_mm_fault(struct spu *spu)
387{
388 struct spu_priv1 __iomem *priv1;
389 struct mm_struct *mm = spu->mm;
390 struct vm_area_struct *vma;
391 u64 ea, dsisr, is_write;
392 int ret;
393
394 priv1 = spu->priv1;
395 ea = in_be64(&priv1->mfc_dar_RW);
396 dsisr = in_be64(&priv1->mfc_dsisr_RW);
397#if 0
398 if (!IS_VALID_EA(ea)) {
399 return -EFAULT;
400 }
401#endif /* XXX */
402 if (mm == NULL) {
403 return -EFAULT;
404 }
405 if (mm->pgd == NULL) {
406 return -EFAULT;
407 }
408
409 down_read(&mm->mmap_sem);
410 vma = find_vma(mm, ea);
411 if (!vma)
412 goto bad_area;
413 if (vma->vm_start <= ea)
414 goto good_area;
415 if (!(vma->vm_flags & VM_GROWSDOWN))
416 goto bad_area;
417#if 0
418 if (expand_stack(vma, ea))
419 goto bad_area;
420#endif /* XXX */
421good_area:
422 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
423 if (is_write) {
424 if (!(vma->vm_flags & VM_WRITE))
425 goto bad_area;
426 } else {
427 if (dsisr & MFC_DSISR_ACCESS_DENIED)
428 goto bad_area;
429 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
430 goto bad_area;
431 }
432 ret = 0;
433 switch (handle_mm_fault(mm, vma, ea, is_write)) {
434 case VM_FAULT_MINOR:
435 current->min_flt++;
436 break;
437 case VM_FAULT_MAJOR:
438 current->maj_flt++;
439 break;
440 case VM_FAULT_SIGBUS:
441 ret = -EFAULT;
442 goto bad_area;
443 case VM_FAULT_OOM:
444 ret = -ENOMEM;
445 goto bad_area;
446 default:
447 BUG();
448 }
449 up_read(&mm->mmap_sem);
450 return ret;
451
452bad_area:
453 up_read(&mm->mmap_sem);
454 return -EFAULT;
455}
456
457static int spu_handle_pte_fault(struct spu *spu)
458{
459 struct spu_priv1 __iomem *priv1;
460 u64 ea, dsisr, access, error = 0UL;
461 int ret = 0;
462
463 priv1 = spu->priv1;
464 ea = in_be64(&priv1->mfc_dar_RW);
465 dsisr = in_be64(&priv1->mfc_dsisr_RW);
466 access = (_PAGE_PRESENT | _PAGE_USER);
467 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
468 if (hash_page(ea, access, 0x300) != 0)
469 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
470 }
471 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
472 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
473 if ((ret = spu_handle_mm_fault(spu)) != 0)
474 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
475 else
476 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
477 }
478 if (!error)
479 spu_restart_dma(spu);
480
481 return ret;
482}
483
484int spu_run(struct spu *spu)
485{
486 struct spu_problem __iomem *prob;
487 struct spu_priv1 __iomem *priv1;
488 struct spu_priv2 __iomem *priv2;
489 unsigned long status;
490 int ret;
491
492 prob = spu->problem;
493 priv1 = spu->priv1;
494 priv2 = spu->priv2;
495
496 /* Let SPU run. */
497 spu->mm = current->mm;
498 eieio();
499 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
500
501 do {
502 ret = wait_event_interruptible(spu->stop_wq,
503 (!((status = in_be32(&prob->spu_status_R)) & 0x1))
504 || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
505 || spu->class_0_pending);
506
507 if (status & SPU_STATUS_STOPPED_BY_STOP)
508 ret = -EAGAIN;
509 else if (status & SPU_STATUS_STOPPED_BY_HALT)
510 ret = -EIO;
511 else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
512 ret = spu_handle_pte_fault(spu);
513
514 if (spu->class_0_pending)
515 spu_irq_class_0_bottom(spu);
516
517 if (!ret && signal_pending(current))
518 ret = -ERESTARTSYS;
519
520 } while (!ret);
521
522 /* Ensure SPU is stopped. */
523 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
524 eieio();
525 while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
526 cpu_relax();
527
528 out_be64(&priv2->slb_invalidate_all_W, 0);
529 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
530 eieio();
531
532 spu->mm = NULL;
533
534 /* Check for SPU breakpoint. */
535 if (unlikely(current->ptrace & PT_PTRACED)) {
536 status = in_be32(&prob->spu_status_R);
537
538 if ((status & SPU_STATUS_STOPPED_BY_STOP)
539 && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
540 force_sig(SIGTRAP, current);
541 ret = -ERESTARTSYS;
542 }
543 }
544
545 return ret;
546}
547EXPORT_SYMBOL(spu_run);
548
549static void __iomem * __init map_spe_prop(struct device_node *n,
550 const char *name)
551{
552 struct address_prop {
553 unsigned long address;
554 unsigned int len;
555 } __attribute__((packed)) *prop;
556
557 void *p;
558 int proplen;
559
560 p = get_property(n, name, &proplen);
561 if (proplen != sizeof (struct address_prop))
562 return NULL;
563
564 prop = p;
565
566 return ioremap(prop->address, prop->len);
567}
568
569static void spu_unmap(struct spu *spu)
570{
571 iounmap(spu->priv2);
572 iounmap(spu->priv1);
573 iounmap(spu->problem);
574 iounmap((u8 __iomem *)spu->local_store);
575}
576
577static int __init spu_map_device(struct spu *spu, struct device_node *spe)
578{
579 char *prop;
580 int ret;
581
582 ret = -ENODEV;
583 prop = get_property(spe, "isrc", NULL);
584 if (!prop)
585 goto out;
586 spu->isrc = *(unsigned int *)prop;
587
588 spu->name = get_property(spe, "name", NULL);
589 if (!spu->name)
590 goto out;
591
592 prop = get_property(spe, "local-store", NULL);
593 if (!prop)
594 goto out;
595 spu->local_store_phys = *(unsigned long *)prop;
596
597 /* we use local store as ram, not io memory */
598 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
599 if (!spu->local_store)
600 goto out;
601
602 spu->problem= map_spe_prop(spe, "problem");
603 if (!spu->problem)
604 goto out_unmap;
605
606 spu->priv1= map_spe_prop(spe, "priv1");
607 if (!spu->priv1)
608 goto out_unmap;
609
610 spu->priv2= map_spe_prop(spe, "priv2");
611 if (!spu->priv2)
612 goto out_unmap;
613 ret = 0;
614 goto out;
615
616out_unmap:
617 spu_unmap(spu);
618out:
619 return ret;
620}
621
622static int __init find_spu_node_id(struct device_node *spe)
623{
624 unsigned int *id;
625 struct device_node *cpu;
626
627 cpu = spe->parent->parent;
628 id = (unsigned int *)get_property(cpu, "node-id", NULL);
629
630 return id ? *id : 0;
631}
632
633static int __init create_spu(struct device_node *spe)
634{
635 struct spu *spu;
636 int ret;
637 static int number;
638
639 ret = -ENOMEM;
640 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
641 if (!spu)
642 goto out;
643
644 ret = spu_map_device(spu, spe);
645 if (ret)
646 goto out_free;
647
648 spu->node = find_spu_node_id(spe);
649 spu->stop_code = 0;
650 spu->slb_replace = 0;
651 spu->mm = NULL;
652 spu->class_0_pending = 0;
653 spin_lock_init(&spu->register_lock);
654
655 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
656 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
657
658 init_waitqueue_head(&spu->stop_wq);
659 init_waitqueue_head(&spu->wbox_wq);
660 init_waitqueue_head(&spu->ibox_wq);
661
662 spu->ibox_fasync = NULL;
663 spu->wbox_fasync = NULL;
664
665 down(&spu_mutex);
666 spu->number = number++;
667 ret = spu_request_irqs(spu);
668 if (ret)
669 goto out_unmap;
670
671 list_add(&spu->list, &spu_list);
672 up(&spu_mutex);
673
674 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
675 spu->name, spu->isrc, spu->local_store,
676 spu->problem, spu->priv1, spu->priv2, spu->number);
677 goto out;
678
679out_unmap:
680 up(&spu_mutex);
681 spu_unmap(spu);
682out_free:
683 kfree(spu);
684out:
685 return ret;
686}
687
688static void destroy_spu(struct spu *spu)
689{
690 list_del_init(&spu->list);
691
692 spu_free_irqs(spu);
693 spu_unmap(spu);
694 kfree(spu);
695}
696
697static void cleanup_spu_base(void)
698{
699 struct spu *spu, *tmp;
700 down(&spu_mutex);
701 list_for_each_entry_safe(spu, tmp, &spu_list, list)
702 destroy_spu(spu);
703 up(&spu_mutex);
704}
705module_exit(cleanup_spu_base);
706
707static int __init init_spu_base(void)
708{
709 struct device_node *node;
710 int ret;
711
712 ret = -ENODEV;
713 for (node = of_find_node_by_type(NULL, "spe");
714 node; node = of_find_node_by_type(node, "spe")) {
715 ret = create_spu(node);
716 if (ret) {
717 printk(KERN_WARNING "%s: Error initializing %s\n",
718 __FUNCTION__, node->name);
719 cleanup_spu_base();
720 break;
721 }
722 }
723 /* in some old firmware versions, the spe is called 'spc', so we
724 look for that as well */
725 for (node = of_find_node_by_type(NULL, "spc");
726 node; node = of_find_node_by_type(node, "spc")) {
727 ret = create_spu(node);
728 if (ret) {
729 printk(KERN_WARNING "%s: Error initializing %s\n",
730 __FUNCTION__, node->name);
731 cleanup_spu_base();
732 break;
733 }
734 }
735 return ret;
736}
737module_init(init_spu_base);
738
739MODULE_LICENSE("GPL");
740MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
new file mode 100644
index 000000000000..43e0b187ffde
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -0,0 +1,86 @@
1/*
2 * SPU file system -- system call stubs
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/file.h>
23#include <linux/module.h>
24#include <linux/syscalls.h>
25
26#include <asm/spu.h>
27
28struct spufs_calls spufs_calls = {
29 .owner = NULL,
30};
31
32/* These stub syscalls are needed to have the actual implementation
33 * within a loadable module. When spufs is built into the kernel,
34 * this file is not used and the syscalls directly enter the fs code */
35
36asmlinkage long sys_spu_create(const char __user *name,
37 unsigned int flags, mode_t mode)
38{
39 long ret;
40
41 ret = -ENOSYS;
42 if (try_module_get(spufs_calls.owner)) {
43 ret = spufs_calls.create_thread(name, flags, mode);
44 module_put(spufs_calls.owner);
45 }
46 return ret;
47}
48
49asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
50{
51 long ret;
52 struct file *filp;
53 int fput_needed;
54
55 ret = -ENOSYS;
56 if (try_module_get(spufs_calls.owner)) {
57 ret = -EBADF;
58 filp = fget_light(fd, &fput_needed);
59 if (filp) {
60 ret = spufs_calls.spu_run(filp, unpc, ustatus);
61 fput_light(filp, fput_needed);
62 }
63 module_put(spufs_calls.owner);
64 }
65 return ret;
66}
67
68int register_spu_syscalls(struct spufs_calls *calls)
69{
70 if (spufs_calls.owner)
71 return -EBUSY;
72
73 spufs_calls.create_thread = calls->create_thread;
74 spufs_calls.spu_run = calls->spu_run;
75 smp_mb();
76 spufs_calls.owner = calls->owner;
77 return 0;
78}
79EXPORT_SYMBOL_GPL(register_spu_syscalls);
80
81void unregister_spu_syscalls(struct spufs_calls *calls)
82{
83 BUG_ON(spufs_calls.owner != calls->owner);
84 spufs_calls.owner = NULL;
85}
86EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
new file mode 100644
index 000000000000..6f496e37bcb7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_SPU_FS) += spufs.o
2
3spufs-y += inode.o file.o context.o syscalls.o
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
new file mode 100644
index 000000000000..a69b85e2778a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -0,0 +1,67 @@
1/*
2 * SPU file system -- SPU context management
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/slab.h>
24#include <asm/spu.h>
25#include "spufs.h"
26
27struct spu_context *alloc_spu_context(void)
28{
29 struct spu_context *ctx;
30 ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
31 if (!ctx)
32 goto out;
33 ctx->spu = spu_alloc();
34 if (!ctx->spu)
35 goto out_free;
36 init_rwsem(&ctx->backing_sema);
37 spin_lock_init(&ctx->mmio_lock);
38 kref_init(&ctx->kref);
39 goto out;
40out_free:
41 kfree(ctx);
42 ctx = NULL;
43out:
44 return ctx;
45}
46
47void destroy_spu_context(struct kref *kref)
48{
49 struct spu_context *ctx;
50 ctx = container_of(kref, struct spu_context, kref);
51 if (ctx->spu)
52 spu_free(ctx->spu);
53 kfree(ctx);
54}
55
56struct spu_context * get_spu_context(struct spu_context *ctx)
57{
58 kref_get(&ctx->kref);
59 return ctx;
60}
61
62int put_spu_context(struct spu_context *ctx)
63{
64 return kref_put(&ctx->kref, &destroy_spu_context);
65}
66
67
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
new file mode 100644
index 000000000000..c1e643310494
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -0,0 +1,596 @@
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/fs.h>
24#include <linux/ioctl.h>
25#include <linux/module.h>
26#include <linux/poll.h>
27
28#include <asm/io.h>
29#include <asm/semaphore.h>
30#include <asm/spu.h>
31#include <asm/uaccess.h>
32
33#include "spufs.h"
34
35static int
36spufs_mem_open(struct inode *inode, struct file *file)
37{
38 struct spufs_inode_info *i = SPUFS_I(inode);
39 file->private_data = i->i_ctx;
40 return 0;
41}
42
43static ssize_t
44spufs_mem_read(struct file *file, char __user *buffer,
45 size_t size, loff_t *pos)
46{
47 struct spu *spu;
48 struct spu_context *ctx;
49 int ret;
50
51 ctx = file->private_data;
52 spu = ctx->spu;
53
54 down_read(&ctx->backing_sema);
55 if (spu->number & 0/*1*/) {
56 ret = generic_file_read(file, buffer, size, pos);
57 goto out;
58 }
59
60 ret = simple_read_from_buffer(buffer, size, pos,
61 spu->local_store, LS_SIZE);
62out:
63 up_read(&ctx->backing_sema);
64 return ret;
65}
66
67static ssize_t
68spufs_mem_write(struct file *file, const char __user *buffer,
69 size_t size, loff_t *pos)
70{
71 struct spu_context *ctx = file->private_data;
72 struct spu *spu = ctx->spu;
73
74 if (spu->number & 0) //1)
75 return generic_file_write(file, buffer, size, pos);
76
77 size = min_t(ssize_t, LS_SIZE - *pos, size);
78 if (size <= 0)
79 return -EFBIG;
80 *pos += size;
81 return copy_from_user(spu->local_store + *pos - size,
82 buffer, size) ? -EFAULT : size;
83}
84
85static int
86spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
87{
88 struct spu_context *ctx = file->private_data;
89 struct spu *spu = ctx->spu;
90 unsigned long pfn;
91
92 if (spu->number & 0) //1)
93 return generic_file_mmap(file, vma);
94
95 vma->vm_flags |= VM_RESERVED;
96 vma->vm_page_prot = __pgprot(pgprot_val (vma->vm_page_prot)
97 | _PAGE_NO_CACHE);
98 pfn = spu->local_store_phys >> PAGE_SHIFT;
99 /*
100 * This will work for actual SPUs, but not for vmalloc memory:
101 */
102 if (remap_pfn_range(vma, vma->vm_start, pfn,
103 vma->vm_end-vma->vm_start, vma->vm_page_prot))
104 return -EAGAIN;
105 return 0;
106}
107
108static struct file_operations spufs_mem_fops = {
109 .open = spufs_mem_open,
110 .read = spufs_mem_read,
111 .write = spufs_mem_write,
112 .mmap = spufs_mem_mmap,
113 .llseek = generic_file_llseek,
114};
115
116/* generic open function for all pipe-like files */
117static int spufs_pipe_open(struct inode *inode, struct file *file)
118{
119 struct spufs_inode_info *i = SPUFS_I(inode);
120 file->private_data = i->i_ctx;
121
122 return nonseekable_open(inode, file);
123}
124
125static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
126 size_t len, loff_t *pos)
127{
128 struct spu_context *ctx;
129 struct spu_problem __iomem *prob;
130 u32 mbox_stat;
131 u32 mbox_data;
132
133 if (len < 4)
134 return -EINVAL;
135
136 ctx = file->private_data;
137 prob = ctx->spu->problem;
138 mbox_stat = in_be32(&prob->mb_stat_R);
139 if (!(mbox_stat & 0x0000ff))
140 return -EAGAIN;
141
142 mbox_data = in_be32(&prob->pu_mb_R);
143
144 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
145 return -EFAULT;
146
147 return 4;
148}
149
150static struct file_operations spufs_mbox_fops = {
151 .open = spufs_pipe_open,
152 .read = spufs_mbox_read,
153};
154
155static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
156 size_t len, loff_t *pos)
157{
158 struct spu_context *ctx;
159 u32 mbox_stat;
160
161 if (len < 4)
162 return -EINVAL;
163
164 ctx = file->private_data;
165 mbox_stat = in_be32(&ctx->spu->problem->mb_stat_R) & 0xff;
166
167 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
168 return -EFAULT;
169
170 return 4;
171}
172
173static struct file_operations spufs_mbox_stat_fops = {
174 .open = spufs_pipe_open,
175 .read = spufs_mbox_stat_read,
176};
177
178/* low-level ibox access function */
179size_t spu_ibox_read(struct spu *spu, u32 *data)
180{
181 int ret;
182
183 spin_lock_irq(&spu->register_lock);
184
185 if (in_be32(&spu->problem->mb_stat_R) & 0xff0000) {
186 /* read the first available word */
187 *data = in_be64(&spu->priv2->puint_mb_R);
188 ret = 4;
189 } else {
190 /* make sure we get woken up by the interrupt */
191 out_be64(&spu->priv1->int_mask_class2_RW,
192 in_be64(&spu->priv1->int_mask_class2_RW) | 0x1);
193 ret = 0;
194 }
195
196 spin_unlock_irq(&spu->register_lock);
197 return ret;
198}
199EXPORT_SYMBOL(spu_ibox_read);
200
201static int spufs_ibox_fasync(int fd, struct file *file, int on)
202{
203 struct spu_context *ctx;
204 ctx = file->private_data;
205 return fasync_helper(fd, file, on, &ctx->spu->ibox_fasync);
206}
207
208static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
209 size_t len, loff_t *pos)
210{
211 struct spu_context *ctx;
212 u32 ibox_data;
213 ssize_t ret;
214
215 if (len < 4)
216 return -EINVAL;
217
218 ctx = file->private_data;
219
220 ret = 0;
221 if (file->f_flags & O_NONBLOCK) {
222 if (!spu_ibox_read(ctx->spu, &ibox_data))
223 ret = -EAGAIN;
224 } else {
225 ret = wait_event_interruptible(ctx->spu->ibox_wq,
226 spu_ibox_read(ctx->spu, &ibox_data));
227 }
228
229 if (ret)
230 return ret;
231
232 ret = 4;
233 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
234 ret = -EFAULT;
235
236 return ret;
237}
238
239static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
240{
241 struct spu_context *ctx;
242 struct spu_problem __iomem *prob;
243 u32 mbox_stat;
244 unsigned int mask;
245
246 ctx = file->private_data;
247 prob = ctx->spu->problem;
248 mbox_stat = in_be32(&prob->mb_stat_R);
249
250 poll_wait(file, &ctx->spu->ibox_wq, wait);
251
252 mask = 0;
253 if (mbox_stat & 0xff0000)
254 mask |= POLLIN | POLLRDNORM;
255
256 return mask;
257}
258
259static struct file_operations spufs_ibox_fops = {
260 .open = spufs_pipe_open,
261 .read = spufs_ibox_read,
262 .poll = spufs_ibox_poll,
263 .fasync = spufs_ibox_fasync,
264};
265
266static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
267 size_t len, loff_t *pos)
268{
269 struct spu_context *ctx;
270 u32 ibox_stat;
271
272 if (len < 4)
273 return -EINVAL;
274
275 ctx = file->private_data;
276 ibox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 16) & 0xff;
277
278 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
279 return -EFAULT;
280
281 return 4;
282}
283
284static struct file_operations spufs_ibox_stat_fops = {
285 .open = spufs_pipe_open,
286 .read = spufs_ibox_stat_read,
287};
288
289/* low-level mailbox write */
290size_t spu_wbox_write(struct spu *spu, u32 data)
291{
292 int ret;
293
294 spin_lock_irq(&spu->register_lock);
295
296 if (in_be32(&spu->problem->mb_stat_R) & 0x00ff00) {
297 /* we have space to write wbox_data to */
298 out_be32(&spu->problem->spu_mb_W, data);
299 ret = 4;
300 } else {
301 /* make sure we get woken up by the interrupt when space
302 becomes available */
303 out_be64(&spu->priv1->int_mask_class2_RW,
304 in_be64(&spu->priv1->int_mask_class2_RW) | 0x10);
305 ret = 0;
306 }
307
308 spin_unlock_irq(&spu->register_lock);
309 return ret;
310}
311EXPORT_SYMBOL(spu_wbox_write);
312
313static int spufs_wbox_fasync(int fd, struct file *file, int on)
314{
315 struct spu_context *ctx;
316 ctx = file->private_data;
317 return fasync_helper(fd, file, on, &ctx->spu->wbox_fasync);
318}
319
320static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
321 size_t len, loff_t *pos)
322{
323 struct spu_context *ctx;
324 u32 wbox_data;
325 int ret;
326
327 if (len < 4)
328 return -EINVAL;
329
330 ctx = file->private_data;
331
332 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
333 return -EFAULT;
334
335 ret = 0;
336 if (file->f_flags & O_NONBLOCK) {
337 if (!spu_wbox_write(ctx->spu, wbox_data))
338 ret = -EAGAIN;
339 } else {
340 ret = wait_event_interruptible(ctx->spu->wbox_wq,
341 spu_wbox_write(ctx->spu, wbox_data));
342 }
343
344 return ret ? ret : sizeof wbox_data;
345}
346
347static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
348{
349 struct spu_context *ctx;
350 struct spu_problem __iomem *prob;
351 u32 mbox_stat;
352 unsigned int mask;
353
354 ctx = file->private_data;
355 prob = ctx->spu->problem;
356 mbox_stat = in_be32(&prob->mb_stat_R);
357
358 poll_wait(file, &ctx->spu->wbox_wq, wait);
359
360 mask = 0;
361 if (mbox_stat & 0x00ff00)
362 mask = POLLOUT | POLLWRNORM;
363
364 return mask;
365}
366
367static struct file_operations spufs_wbox_fops = {
368 .open = spufs_pipe_open,
369 .write = spufs_wbox_write,
370 .poll = spufs_wbox_poll,
371 .fasync = spufs_wbox_fasync,
372};
373
374static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
375 size_t len, loff_t *pos)
376{
377 struct spu_context *ctx;
378 u32 wbox_stat;
379
380 if (len < 4)
381 return -EINVAL;
382
383 ctx = file->private_data;
384 wbox_stat = (in_be32(&ctx->spu->problem->mb_stat_R) >> 8) & 0xff;
385
386 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
387 return -EFAULT;
388
389 return 4;
390}
391
392static struct file_operations spufs_wbox_stat_fops = {
393 .open = spufs_pipe_open,
394 .read = spufs_wbox_stat_read,
395};
396
397long spufs_run_spu(struct file *file, struct spu_context *ctx,
398 u32 *npc, u32 *status)
399{
400 struct spu_problem __iomem *prob;
401 int ret;
402
403 if (file->f_flags & O_NONBLOCK) {
404 ret = -EAGAIN;
405 if (!down_write_trylock(&ctx->backing_sema))
406 goto out;
407 } else {
408 down_write(&ctx->backing_sema);
409 }
410
411 prob = ctx->spu->problem;
412 out_be32(&prob->spu_npc_RW, *npc);
413
414 ret = spu_run(ctx->spu);
415
416 *status = in_be32(&prob->spu_status_R);
417 *npc = in_be32(&prob->spu_npc_RW);
418
419 up_write(&ctx->backing_sema);
420
421out:
422 return ret;
423}
424
425static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
426 size_t len, loff_t *pos)
427{
428 struct spu_context *ctx;
429 struct spu_problem *prob;
430 u32 data;
431
432 ctx = file->private_data;
433 prob = ctx->spu->problem;
434
435 if (len < 4)
436 return -EINVAL;
437
438 data = in_be32(&prob->signal_notify1);
439 if (copy_to_user(buf, &data, 4))
440 return -EFAULT;
441
442 return 4;
443}
444
445static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
446 size_t len, loff_t *pos)
447{
448 struct spu_context *ctx;
449 struct spu_problem *prob;
450 u32 data;
451
452 ctx = file->private_data;
453 prob = ctx->spu->problem;
454
455 if (len < 4)
456 return -EINVAL;
457
458 if (copy_from_user(&data, buf, 4))
459 return -EFAULT;
460
461 out_be32(&prob->signal_notify1, data);
462
463 return 4;
464}
465
466static struct file_operations spufs_signal1_fops = {
467 .open = spufs_pipe_open,
468 .read = spufs_signal1_read,
469 .write = spufs_signal1_write,
470};
471
472static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
473 size_t len, loff_t *pos)
474{
475 struct spu_context *ctx;
476 struct spu_problem *prob;
477 u32 data;
478
479 ctx = file->private_data;
480 prob = ctx->spu->problem;
481
482 if (len < 4)
483 return -EINVAL;
484
485 data = in_be32(&prob->signal_notify2);
486 if (copy_to_user(buf, &data, 4))
487 return -EFAULT;
488
489 return 4;
490}
491
492static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
493 size_t len, loff_t *pos)
494{
495 struct spu_context *ctx;
496 struct spu_problem *prob;
497 u32 data;
498
499 ctx = file->private_data;
500 prob = ctx->spu->problem;
501
502 if (len < 4)
503 return -EINVAL;
504
505 if (copy_from_user(&data, buf, 4))
506 return -EFAULT;
507
508 out_be32(&prob->signal_notify2, data);
509
510 return 4;
511}
512
513static struct file_operations spufs_signal2_fops = {
514 .open = spufs_pipe_open,
515 .read = spufs_signal2_read,
516 .write = spufs_signal2_write,
517};
518
519static void spufs_signal1_type_set(void *data, u64 val)
520{
521 struct spu_context *ctx = data;
522 struct spu_priv2 *priv2 = ctx->spu->priv2;
523 u64 tmp;
524
525 spin_lock_irq(&ctx->spu->register_lock);
526 tmp = in_be64(&priv2->spu_cfg_RW);
527 if (val)
528 tmp |= 1;
529 else
530 tmp &= ~1;
531 out_be64(&priv2->spu_cfg_RW, tmp);
532 spin_unlock_irq(&ctx->spu->register_lock);
533}
534
535static u64 spufs_signal1_type_get(void *data)
536{
537 struct spu_context *ctx = data;
538 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0;
539}
540DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
541 spufs_signal1_type_set, "%llu");
542
543static void spufs_signal2_type_set(void *data, u64 val)
544{
545 struct spu_context *ctx = data;
546 struct spu_priv2 *priv2 = ctx->spu->priv2;
547 u64 tmp;
548
549 spin_lock_irq(&ctx->spu->register_lock);
550 tmp = in_be64(&priv2->spu_cfg_RW);
551 if (val)
552 tmp |= 2;
553 else
554 tmp &= ~2;
555 out_be64(&priv2->spu_cfg_RW, tmp);
556 spin_unlock_irq(&ctx->spu->register_lock);
557}
558
559static u64 spufs_signal2_type_get(void *data)
560{
561 struct spu_context *ctx = data;
562 return (in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0;
563}
564DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
565 spufs_signal2_type_set, "%llu");
566
567static void spufs_npc_set(void *data, u64 val)
568{
569 struct spu_context *ctx = data;
570 out_be32(&ctx->spu->problem->spu_npc_RW, val);
571}
572
573static u64 spufs_npc_get(void *data)
574{
575 struct spu_context *ctx = data;
576 u64 ret;
577 ret = in_be32(&ctx->spu->problem->spu_npc_RW);
578 return ret;
579}
580DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
581
582struct tree_descr spufs_dir_contents[] = {
583 { "mem", &spufs_mem_fops, 0666, },
584 { "mbox", &spufs_mbox_fops, 0444, },
585 { "ibox", &spufs_ibox_fops, 0444, },
586 { "wbox", &spufs_wbox_fops, 0222, },
587 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
588 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
589 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
590 { "signal1", &spufs_signal1_fops, 0666, },
591 { "signal2", &spufs_signal2_fops, 0666, },
592 { "signal1_type", &spufs_signal1_type, 0666, },
593 { "signal2_type", &spufs_signal2_type, 0666, },
594 { "npc", &spufs_npc_ops, 0666, },
595 {},
596};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
new file mode 100644
index 000000000000..f7aa0a6b1ce5
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -0,0 +1,470 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/backing-dev.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/module.h>
29#include <linux/namei.h>
30#include <linux/pagemap.h>
31#include <linux/poll.h>
32#include <linux/slab.h>
33#include <linux/parser.h>
34
35#include <asm/io.h>
36#include <asm/semaphore.h>
37#include <asm/spu.h>
38#include <asm/uaccess.h>
39
40#include "spufs.h"
41
42static kmem_cache_t *spufs_inode_cache;
43
44/* Information about the backing dev, same as ramfs */
45#if 0
46static struct backing_dev_info spufs_backing_dev_info = {
47 .ra_pages = 0, /* No readahead */
48 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
49 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP |
50 BDI_CAP_WRITE_MAP,
51};
52
53static struct address_space_operations spufs_aops = {
54 .readpage = simple_readpage,
55 .prepare_write = simple_prepare_write,
56 .commit_write = simple_commit_write,
57};
58#endif
59
60/* Inode operations */
61
62static struct inode *
63spufs_alloc_inode(struct super_block *sb)
64{
65 struct spufs_inode_info *ei;
66
67 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
68 if (!ei)
69 return NULL;
70 return &ei->vfs_inode;
71}
72
73static void
74spufs_destroy_inode(struct inode *inode)
75{
76 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
77}
78
79static void
80spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
81{
82 struct spufs_inode_info *ei = p;
83
84 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
85 SLAB_CTOR_CONSTRUCTOR) {
86 inode_init_once(&ei->vfs_inode);
87 }
88}
89
90static struct inode *
91spufs_new_inode(struct super_block *sb, int mode)
92{
93 struct inode *inode;
94
95 inode = new_inode(sb);
96 if (!inode)
97 goto out;
98
99 inode->i_mode = mode;
100 inode->i_uid = current->fsuid;
101 inode->i_gid = current->fsgid;
102 inode->i_blksize = PAGE_CACHE_SIZE;
103 inode->i_blocks = 0;
104 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
105out:
106 return inode;
107}
108
109static int
110spufs_setattr(struct dentry *dentry, struct iattr *attr)
111{
112 struct inode *inode = dentry->d_inode;
113
114/* dump_stack();
115 pr_debug("ia_size %lld, i_size:%lld\n", attr->ia_size, inode->i_size);
116*/
117 if ((attr->ia_valid & ATTR_SIZE) &&
118 (attr->ia_size != inode->i_size))
119 return -EINVAL;
120 return inode_setattr(inode, attr);
121}
122
123
124static int
125spufs_new_file(struct super_block *sb, struct dentry *dentry,
126 struct file_operations *fops, int mode,
127 struct spu_context *ctx)
128{
129 static struct inode_operations spufs_file_iops = {
130 .getattr = simple_getattr,
131 .setattr = spufs_setattr,
132 .unlink = simple_unlink,
133 };
134 struct inode *inode;
135 int ret;
136
137 ret = -ENOSPC;
138 inode = spufs_new_inode(sb, S_IFREG | mode);
139 if (!inode)
140 goto out;
141
142 ret = 0;
143 inode->i_op = &spufs_file_iops;
144 inode->i_fop = fops;
145 inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
146 d_add(dentry, inode);
147out:
148 return ret;
149}
150
151static void
152spufs_delete_inode(struct inode *inode)
153{
154 if (SPUFS_I(inode)->i_ctx)
155 put_spu_context(SPUFS_I(inode)->i_ctx);
156 clear_inode(inode);
157}
158
159static int
160spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
161 int mode, struct spu_context *ctx)
162{
163 struct dentry *dentry;
164 int ret;
165
166 while (files->name && files->name[0]) {
167 ret = -ENOMEM;
168 dentry = d_alloc_name(dir, files->name);
169 if (!dentry)
170 goto out;
171 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
172 files->mode & mode, ctx);
173 if (ret)
174 goto out;
175 files++;
176 }
177 return 0;
178out:
179 // FIXME: remove all files that are left
180
181 return ret;
182}
183
184static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
185{
186 struct dentry *dentry;
187 int err;
188
189 spin_lock(&dcache_lock);
190 /* remove all entries */
191 err = 0;
192 list_for_each_entry(dentry, &dir_dentry->d_subdirs, d_child) {
193 if (d_unhashed(dentry) || !dentry->d_inode)
194 continue;
195 atomic_dec(&dentry->d_count);
196 spin_lock(&dentry->d_lock);
197 __d_drop(dentry);
198 spin_unlock(&dentry->d_lock);
199 }
200 spin_unlock(&dcache_lock);
201 if (!err) {
202 shrink_dcache_parent(dir_dentry);
203 err = simple_rmdir(root, dir_dentry);
204 }
205 return err;
206}
207
208static int spufs_dir_close(struct inode *inode, struct file *file)
209{
210 struct inode *dir;
211 struct dentry *dentry;
212 int ret;
213
214 dentry = file->f_dentry;
215 dir = dentry->d_parent->d_inode;
216 down(&dir->i_sem);
217 ret = spufs_rmdir(dir, file->f_dentry);
218 WARN_ON(ret);
219 up(&dir->i_sem);
220 return dcache_dir_close(inode, file);
221}
222
223struct inode_operations spufs_dir_inode_operations = {
224 .lookup = simple_lookup,
225};
226
227struct file_operations spufs_autodelete_dir_operations = {
228 .open = dcache_dir_open,
229 .release = spufs_dir_close,
230 .llseek = dcache_dir_lseek,
231 .read = generic_read_dir,
232 .readdir = dcache_readdir,
233 .fsync = simple_sync_file,
234};
235
236static int
237spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
238{
239 int ret;
240 struct inode *inode;
241 struct spu_context *ctx;
242
243 ret = -ENOSPC;
244 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
245 if (!inode)
246 goto out;
247
248 if (dir->i_mode & S_ISGID) {
249 inode->i_gid = dir->i_gid;
250 inode->i_mode &= S_ISGID;
251 }
252 ctx = alloc_spu_context();
253 SPUFS_I(inode)->i_ctx = ctx;
254 if (!ctx)
255 goto out_iput;
256
257 inode->i_op = &spufs_dir_inode_operations;
258 inode->i_fop = &simple_dir_operations;
259 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
260 if (ret)
261 goto out_free_ctx;
262
263 d_instantiate(dentry, inode);
264 dget(dentry);
265 dir->i_nlink++;
266 goto out;
267
268out_free_ctx:
269 put_spu_context(ctx);
270out_iput:
271 iput(inode);
272out:
273 return ret;
274}
275
276long
277spufs_create_thread(struct nameidata *nd, const char *name,
278 unsigned int flags, mode_t mode)
279{
280 struct dentry *dentry;
281 struct file *filp;
282 int ret;
283
284 /* need to be at the root of spufs */
285 ret = -EINVAL;
286 if (nd->dentry->d_sb->s_magic != SPUFS_MAGIC ||
287 nd->dentry != nd->dentry->d_sb->s_root)
288 goto out;
289
290 dentry = lookup_create(nd, 1);
291 ret = PTR_ERR(dentry);
292 if (IS_ERR(dentry))
293 goto out_dir;
294
295 ret = -EEXIST;
296 if (dentry->d_inode)
297 goto out_dput;
298
299 mode &= ~current->fs->umask;
300 ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
301 if (ret)
302 goto out_dput;
303
304 ret = get_unused_fd();
305 if (ret < 0)
306 goto out_dput;
307
308 dentry->d_inode->i_nlink++;
309
310 filp = filp_open(name, O_RDONLY, mode);
311 if (IS_ERR(filp)) {
312 // FIXME: remove directory again
313 put_unused_fd(ret);
314 ret = PTR_ERR(filp);
315 } else {
316 filp->f_op = &spufs_autodelete_dir_operations;
317 fd_install(ret, filp);
318 }
319
320out_dput:
321 dput(dentry);
322out_dir:
323 up(&nd->dentry->d_inode->i_sem);
324out:
325 return ret;
326}
327
328/* File system initialization */
329enum {
330 Opt_uid, Opt_gid, Opt_err,
331};
332
333static match_table_t spufs_tokens = {
334 { Opt_uid, "uid=%d" },
335 { Opt_gid, "gid=%d" },
336 { Opt_err, NULL },
337};
338
339static int
340spufs_parse_options(char *options, struct inode *root)
341{
342 char *p;
343 substring_t args[MAX_OPT_ARGS];
344
345 while ((p = strsep(&options, ",")) != NULL) {
346 int token, option;
347
348 if (!*p)
349 continue;
350
351 token = match_token(p, spufs_tokens, args);
352 switch (token) {
353 case Opt_uid:
354 if (match_int(&args[0], &option))
355 return 0;
356 root->i_uid = option;
357 break;
358 case Opt_gid:
359 if (match_int(&args[0], &option))
360 return 0;
361 root->i_gid = option;
362 break;
363 default:
364 return 0;
365 }
366 }
367 return 1;
368}
369
370static int
371spufs_create_root(struct super_block *sb, void *data) {
372 struct inode *inode;
373 int ret;
374
375 ret = -ENOMEM;
376 inode = spufs_new_inode(sb, S_IFDIR | 0775);
377 if (!inode)
378 goto out;
379
380 inode->i_op = &spufs_dir_inode_operations;
381 inode->i_fop = &simple_dir_operations;
382 SPUFS_I(inode)->i_ctx = NULL;
383
384 ret = -EINVAL;
385 if (!spufs_parse_options(data, inode))
386 goto out_iput;
387
388 ret = -ENOMEM;
389 sb->s_root = d_alloc_root(inode);
390 if (!sb->s_root)
391 goto out_iput;
392
393 return 0;
394out_iput:
395 iput(inode);
396out:
397 return ret;
398}
399
400static int
401spufs_fill_super(struct super_block *sb, void *data, int silent)
402{
403 static struct super_operations s_ops = {
404 .alloc_inode = spufs_alloc_inode,
405 .destroy_inode = spufs_destroy_inode,
406 .statfs = simple_statfs,
407 .delete_inode = spufs_delete_inode,
408 .drop_inode = generic_delete_inode,
409 };
410
411 sb->s_maxbytes = MAX_LFS_FILESIZE;
412 sb->s_blocksize = PAGE_CACHE_SIZE;
413 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
414 sb->s_magic = SPUFS_MAGIC;
415 sb->s_op = &s_ops;
416
417 return spufs_create_root(sb, data);
418}
419
420static struct super_block *
421spufs_get_sb(struct file_system_type *fstype, int flags,
422 const char *name, void *data)
423{
424 return get_sb_single(fstype, flags, data, spufs_fill_super);
425}
426
427static struct file_system_type spufs_type = {
428 .owner = THIS_MODULE,
429 .name = "spufs",
430 .get_sb = spufs_get_sb,
431 .kill_sb = kill_litter_super,
432};
433
434static int spufs_init(void)
435{
436 int ret;
437 ret = -ENOMEM;
438 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
439 sizeof(struct spufs_inode_info), 0,
440 SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
441
442 if (!spufs_inode_cache)
443 goto out;
444 ret = register_filesystem(&spufs_type);
445 if (ret)
446 goto out_cache;
447 ret = register_spu_syscalls(&spufs_calls);
448 if (ret)
449 goto out_fs;
450 return 0;
451out_fs:
452 unregister_filesystem(&spufs_type);
453out_cache:
454 kmem_cache_destroy(spufs_inode_cache);
455out:
456 return ret;
457}
458module_init(spufs_init);
459
460static void spufs_exit(void)
461{
462 unregister_spu_syscalls(&spufs_calls);
463 unregister_filesystem(&spufs_type);
464 kmem_cache_destroy(spufs_inode_cache);
465}
466module_exit(spufs_exit);
467
468MODULE_LICENSE("GPL");
469MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
470
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
new file mode 100644
index 000000000000..b37fe797ea1c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -0,0 +1,71 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#ifndef SPUFS_H
23#define SPUFS_H
24
25#include <linux/kref.h>
26#include <linux/rwsem.h>
27#include <linux/spinlock.h>
28#include <linux/fs.h>
29
30#include <asm/spu.h>
31
32/* The magic number for our file system */
33enum {
34 SPUFS_MAGIC = 0x23c9b64e,
35};
36
37struct spu_context {
38 struct spu *spu; /* pointer to a physical SPU */
39 struct rw_semaphore backing_sema; /* protects the above */
40 spinlock_t mmio_lock; /* protects mmio access */
41
42 struct kref kref;
43};
44
45struct spufs_inode_info {
46 struct spu_context *i_ctx;
47 struct inode vfs_inode;
48};
49#define SPUFS_I(inode) \
50 container_of(inode, struct spufs_inode_info, vfs_inode)
51
52extern struct tree_descr spufs_dir_contents[];
53
54/* system call implementation */
55long spufs_run_spu(struct file *file,
56 struct spu_context *ctx, u32 *npc, u32 *status);
57long spufs_create_thread(struct nameidata *nd, const char *name,
58 unsigned int flags, mode_t mode);
59
60/* context management */
61struct spu_context * alloc_spu_context(void);
62void destroy_spu_context(struct kref *kref);
63struct spu_context * get_spu_context(struct spu_context *ctx);
64int put_spu_context(struct spu_context *ctx);
65
66void spu_acquire(struct spu_context *ctx);
67void spu_release(struct spu_context *ctx);
68void spu_acquire_runnable(struct spu_context *ctx);
69void spu_acquire_saved(struct spu_context *ctx);
70
71#endif
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
new file mode 100644
index 000000000000..3f71bb5e9d8e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -0,0 +1,106 @@
1#include <linux/file.h>
2#include <linux/fs.h>
3#include <linux/module.h>
4#include <linux/mount.h>
5#include <linux/namei.h>
6
7#include <asm/uaccess.h>
8
9#include "spufs.h"
10
11/**
12 * sys_spu_run - run code loaded into an SPU
13 *
14 * @unpc: next program counter for the SPU
15 * @ustatus: status of the SPU
16 *
17 * This system call transfers the control of execution of a
18 * user space thread to an SPU. It will return when the
19 * SPU has finished executing or when it hits an error
20 * condition and it will be interrupted if a signal needs
21 * to be delivered to a handler in user space.
22 *
23 * The next program counter is set to the passed value
24 * before the SPU starts fetching code and the user space
25 * pointer gets updated with the new value when returning
26 * from kernel space.
27 *
28 * The status value returned from spu_run reflects the
29 * value of the spu_status register after the SPU has stopped.
30 *
31 */
32long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
33{
34 long ret;
35 struct spufs_inode_info *i;
36 u32 npc, status;
37
38 ret = -EFAULT;
39 if (get_user(npc, unpc))
40 goto out;
41
42 ret = -EINVAL;
43 if (filp->f_vfsmnt->mnt_sb->s_magic != SPUFS_MAGIC)
44 goto out;
45
46 i = SPUFS_I(filp->f_dentry->d_inode);
47 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
48
49 if (ret ==-EAGAIN || ret == -EIO)
50 ret = status;
51
52 if (put_user(npc, unpc))
53 ret = -EFAULT;
54
55 if (ustatus && put_user(status, ustatus))
56 ret = -EFAULT;
57out:
58 return ret;
59}
60
61#ifndef MODULE
62asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
63{
64 int fput_needed;
65 struct file *filp;
66 long ret;
67
68 ret = -EBADF;
69 filp = fget_light(fd, &fput_needed);
70 if (filp) {
71 ret = do_spu_run(filp, unpc, ustatus);
72 fput_light(filp, fput_needed);
73 }
74
75 return ret;
76}
77#endif
78
79asmlinkage long sys_spu_create(const char __user *pathname,
80 unsigned int flags, mode_t mode)
81{
82 char *tmp;
83 int ret;
84
85 tmp = getname(pathname);
86 ret = PTR_ERR(tmp);
87 if (!IS_ERR(tmp)) {
88 struct nameidata nd;
89
90 ret = path_lookup(tmp, LOOKUP_PARENT|
91 LOOKUP_OPEN|LOOKUP_CREATE, &nd);
92 if (!ret) {
93 ret = spufs_create_thread(&nd, pathname, flags, mode);
94 path_release(&nd);
95 }
96 putname(tmp);
97 }
98
99 return ret;
100}
101
102struct spufs_calls spufs_calls = {
103 .create_thread = sys_spu_create,
104 .spu_run = do_spu_run,
105 .owner = THIS_MODULE,
106};
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 28f1082e5040..95075f99a6d4 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -307,7 +307,6 @@ EXPORT_SYMBOL(__res);
307 307
308EXPORT_SYMBOL(next_mmu_context); 308EXPORT_SYMBOL(next_mmu_context);
309EXPORT_SYMBOL(set_context); 309EXPORT_SYMBOL(set_context);
310EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
311EXPORT_SYMBOL(disarm_decr); 310EXPORT_SYMBOL(disarm_decr);
312#ifdef CONFIG_PPC_STD_MMU 311#ifdef CONFIG_PPC_STD_MMU
313extern long mol_trampoline; 312extern long mol_trampoline;
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
new file mode 100644
index 000000000000..b036385cd831
--- /dev/null
+++ b/include/asm-powerpc/spu.h
@@ -0,0 +1,498 @@
1/*
2 * SPU core / file system interface and HW structures
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#ifndef _SPU_H
24#define _SPU_H
25#include <linux/config.h>
26#include <linux/kref.h>
27#include <linux/workqueue.h>
28
29#define LS_ORDER (6) /* 256 kb */
30
31#define LS_SIZE (PAGE_SIZE << LS_ORDER)
32
33struct spu {
34 char *name;
35 unsigned long local_store_phys;
36 u8 *local_store;
37 struct spu_problem __iomem *problem;
38 struct spu_priv1 __iomem *priv1;
39 struct spu_priv2 __iomem *priv2;
40 struct list_head list;
41 int number;
42 u32 isrc;
43 u32 node;
44 struct kref kref;
45 size_t ls_size;
46 unsigned int slb_replace;
47 struct mm_struct *mm;
48 int class_0_pending;
49 spinlock_t register_lock;
50
51 u32 stop_code;
52 wait_queue_head_t stop_wq;
53 wait_queue_head_t ibox_wq;
54 wait_queue_head_t wbox_wq;
55 struct fasync_struct *ibox_fasync;
56 struct fasync_struct *wbox_fasync;
57
58 char irq_c0[8];
59 char irq_c1[8];
60 char irq_c2[8];
61};
62
63struct spu *spu_alloc(void);
64void spu_free(struct spu *spu);
65int spu_run(struct spu *spu);
66
67size_t spu_wbox_write(struct spu *spu, u32 data);
68size_t spu_ibox_read(struct spu *spu, u32 *data);
69
70extern struct spufs_calls {
71 asmlinkage long (*create_thread)(const char __user *name,
72 unsigned int flags, mode_t mode);
73 asmlinkage long (*spu_run)(struct file *filp, __u32 __user *unpc,
74 __u32 __user *ustatus);
75 struct module *owner;
76} spufs_calls;
77
78#ifdef CONFIG_SPU_FS_MODULE
79int register_spu_syscalls(struct spufs_calls *calls);
80void unregister_spu_syscalls(struct spufs_calls *calls);
81#else
82static inline int register_spu_syscalls(struct spufs_calls *calls)
83{
84 return 0;
85}
86static inline void unregister_spu_syscalls(struct spufs_calls *calls)
87{
88}
89#endif /* MODULE */
90
91
92/*
93 * This defines the Local Store, Problem Area and Privlege Area of an SPU.
94 */
95
96union mfc_tag_size_class_cmd {
97 struct {
98 u16 mfc_size;
99 u16 mfc_tag;
100 u8 pad;
101 u8 mfc_rclassid;
102 u16 mfc_cmd;
103 } u;
104 struct {
105 u32 mfc_size_tag32;
106 u32 mfc_class_cmd32;
107 } by32;
108 u64 all64;
109};
110
111struct mfc_cq_sr {
112 u64 mfc_cq_data0_RW;
113 u64 mfc_cq_data1_RW;
114 u64 mfc_cq_data2_RW;
115 u64 mfc_cq_data3_RW;
116};
117
118struct spu_problem {
119#define MS_SYNC_PENDING 1L
120 u64 spc_mssync_RW; /* 0x0000 */
121 u8 pad_0x0008_0x3000[0x3000 - 0x0008];
122
123 /* DMA Area */
124 u8 pad_0x3000_0x3004[0x4]; /* 0x3000 */
125 u32 mfc_lsa_W; /* 0x3004 */
126 u64 mfc_ea_W; /* 0x3008 */
127 union mfc_tag_size_class_cmd mfc_union_W; /* 0x3010 */
128 u8 pad_0x3018_0x3104[0xec]; /* 0x3018 */
129 u32 dma_qstatus_R; /* 0x3104 */
130 u8 pad_0x3108_0x3204[0xfc]; /* 0x3108 */
131 u32 dma_querytype_RW; /* 0x3204 */
132 u8 pad_0x3208_0x321c[0x14]; /* 0x3208 */
133 u32 dma_querymask_RW; /* 0x321c */
134 u8 pad_0x3220_0x322c[0xc]; /* 0x3220 */
135 u32 dma_tagstatus_R; /* 0x322c */
136#define DMA_TAGSTATUS_INTR_ANY 1u
137#define DMA_TAGSTATUS_INTR_ALL 2u
138 u8 pad_0x3230_0x4000[0x4000 - 0x3230]; /* 0x3230 */
139
140 /* SPU Control Area */
141 u8 pad_0x4000_0x4004[0x4]; /* 0x4000 */
142 u32 pu_mb_R; /* 0x4004 */
143 u8 pad_0x4008_0x400c[0x4]; /* 0x4008 */
144 u32 spu_mb_W; /* 0x400c */
145 u8 pad_0x4010_0x4014[0x4]; /* 0x4010 */
146 u32 mb_stat_R; /* 0x4014 */
147 u8 pad_0x4018_0x401c[0x4]; /* 0x4018 */
148 u32 spu_runcntl_RW; /* 0x401c */
149#define SPU_RUNCNTL_STOP 0L
150#define SPU_RUNCNTL_RUNNABLE 1L
151 u8 pad_0x4020_0x4024[0x4]; /* 0x4020 */
152 u32 spu_status_R; /* 0x4024 */
153#define SPU_STOP_STATUS_SHIFT 16
154#define SPU_STATUS_STOPPED 0x0
155#define SPU_STATUS_RUNNING 0x1
156#define SPU_STATUS_STOPPED_BY_STOP 0x2
157#define SPU_STATUS_STOPPED_BY_HALT 0x4
158#define SPU_STATUS_WAITING_FOR_CHANNEL 0x8
159#define SPU_STATUS_SINGLE_STEP 0x10
160#define SPU_STATUS_INVALID_INSTR 0x20
161#define SPU_STATUS_INVALID_CH 0x40
162#define SPU_STATUS_ISOLATED_STATE 0x80
163#define SPU_STATUS_ISOLATED_LOAD_STAUTUS 0x200
164#define SPU_STATUS_ISOLATED_EXIT_STAUTUS 0x400
165 u8 pad_0x4028_0x402c[0x4]; /* 0x4028 */
166 u32 spu_spe_R; /* 0x402c */
167 u8 pad_0x4030_0x4034[0x4]; /* 0x4030 */
168 u32 spu_npc_RW; /* 0x4034 */
169 u8 pad_0x4038_0x14000[0x14000 - 0x4038]; /* 0x4038 */
170
171 /* Signal Notification Area */
172 u8 pad_0x14000_0x1400c[0xc]; /* 0x14000 */
173 u32 signal_notify1; /* 0x1400c */
174 u8 pad_0x14010_0x1c00c[0x7ffc]; /* 0x14010 */
175 u32 signal_notify2; /* 0x1c00c */
176} __attribute__ ((aligned(0x20000)));
177
178/* SPU Privilege 2 State Area */
179struct spu_priv2 {
180 /* MFC Registers */
181 u8 pad_0x0000_0x1100[0x1100 - 0x0000]; /* 0x0000 */
182
183 /* SLB Management Registers */
184 u8 pad_0x1100_0x1108[0x8]; /* 0x1100 */
185 u64 slb_index_W; /* 0x1108 */
186#define SLB_INDEX_MASK 0x7L
187 u64 slb_esid_RW; /* 0x1110 */
188 u64 slb_vsid_RW; /* 0x1118 */
189#define SLB_VSID_SUPERVISOR_STATE (0x1ull << 11)
190#define SLB_VSID_SUPERVISOR_STATE_MASK (0x1ull << 11)
191#define SLB_VSID_PROBLEM_STATE (0x1ull << 10)
192#define SLB_VSID_PROBLEM_STATE_MASK (0x1ull << 10)
193#define SLB_VSID_EXECUTE_SEGMENT (0x1ull << 9)
194#define SLB_VSID_NO_EXECUTE_SEGMENT (0x1ull << 9)
195#define SLB_VSID_EXECUTE_SEGMENT_MASK (0x1ull << 9)
196#define SLB_VSID_4K_PAGE (0x0 << 8)
197#define SLB_VSID_LARGE_PAGE (0x1ull << 8)
198#define SLB_VSID_PAGE_SIZE_MASK (0x1ull << 8)
199#define SLB_VSID_CLASS_MASK (0x1ull << 7)
200#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK (0x1ull << 6)
201 u64 slb_invalidate_entry_W; /* 0x1120 */
202 u64 slb_invalidate_all_W; /* 0x1128 */
203 u8 pad_0x1130_0x2000[0x2000 - 0x1130]; /* 0x1130 */
204
205 /* Context Save / Restore Area */
206 struct mfc_cq_sr spuq[16]; /* 0x2000 */
207 struct mfc_cq_sr puq[8]; /* 0x2200 */
208 u8 pad_0x2300_0x3000[0x3000 - 0x2300]; /* 0x2300 */
209
210 /* MFC Control */
211 u64 mfc_control_RW; /* 0x3000 */
212#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0)
213#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0)
214#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0)
215#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8)
216#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8)
217#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8)
218#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK (3ull << 8)
219#define MFC_CNTL_DMA_QUEUES_EMPTY (1ull << 14)
220#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK (1ull << 14)
221#define MFC_CNTL_PURGE_DMA_REQUEST (1ull << 15)
222#define MFC_CNTL_PURGE_DMA_IN_PROGRESS (1ull << 24)
223#define MFC_CNTL_PURGE_DMA_COMPLETE (3ull << 24)
224#define MFC_CNTL_PURGE_DMA_STATUS_MASK (3ull << 24)
225#define MFC_CNTL_RESTART_DMA_COMMAND (1ull << 32)
226#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING (1ull << 32)
227#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
228#define MFC_CNTL_MFC_PRIVILEGE_STATE (2ull << 33)
229#define MFC_CNTL_MFC_PROBLEM_STATE (3ull << 33)
230#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK (3ull << 33)
231#define MFC_CNTL_DECREMENTER_HALTED (1ull << 35)
232#define MFC_CNTL_DECREMENTER_RUNNING (1ull << 40)
233#define MFC_CNTL_DECREMENTER_STATUS_MASK (1ull << 40)
234 u8 pad_0x3008_0x4000[0x4000 - 0x3008]; /* 0x3008 */
235
236 /* Interrupt Mailbox */
237 u64 puint_mb_R; /* 0x4000 */
238 u8 pad_0x4008_0x4040[0x4040 - 0x4008]; /* 0x4008 */
239
240 /* SPU Control */
241 u64 spu_privcntl_RW; /* 0x4040 */
242#define SPU_PRIVCNTL_MODE_NORMAL (0x0ull << 0)
243#define SPU_PRIVCNTL_MODE_SINGLE_STEP (0x1ull << 0)
244#define SPU_PRIVCNTL_MODE_MASK (0x1ull << 0)
245#define SPU_PRIVCNTL_NO_ATTENTION_EVENT (0x0ull << 1)
246#define SPU_PRIVCNTL_ATTENTION_EVENT (0x1ull << 1)
247#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK (0x1ull << 1)
248#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL (0x0ull << 2)
249#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK (0x1ull << 2)
250 u8 pad_0x4048_0x4058[0x10]; /* 0x4048 */
251 u64 spu_lslr_RW; /* 0x4058 */
252 u64 spu_chnlcntptr_RW; /* 0x4060 */
253 u64 spu_chnlcnt_RW; /* 0x4068 */
254 u64 spu_chnldata_RW; /* 0x4070 */
255 u64 spu_cfg_RW; /* 0x4078 */
256 u8 pad_0x4080_0x5000[0x5000 - 0x4080]; /* 0x4080 */
257
258 /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
259 u64 spu_pm_trace_tag_status_RW; /* 0x5000 */
260 u64 spu_tag_status_query_RW; /* 0x5008 */
261#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
262#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
263 u64 spu_cmd_buf1_RW; /* 0x5010 */
264#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
265#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
266 u64 spu_cmd_buf2_RW; /* 0x5018 */
267#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
268#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
269#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
270 u64 spu_atomic_status_RW; /* 0x5020 */
271} __attribute__ ((aligned(0x20000)));
272
273/* SPU Privilege 1 State Area */
274struct spu_priv1 {
275 /* Control and Configuration Area */
276 u64 mfc_sr1_RW; /* 0x000 */
277#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 0x01ull
278#define MFC_STATE1_BUS_TLBIE_MASK 0x02ull
279#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK 0x04ull
280#define MFC_STATE1_PROBLEM_STATE_MASK 0x08ull
281#define MFC_STATE1_RELOCATE_MASK 0x10ull
282#define MFC_STATE1_MASTER_RUN_CONTROL_MASK 0x20ull
283 u64 mfc_lpid_RW; /* 0x008 */
284 u64 spu_idr_RW; /* 0x010 */
285 u64 mfc_vr_RO; /* 0x018 */
286#define MFC_VERSION_BITS (0xffff << 16)
287#define MFC_REVISION_BITS (0xffff)
288#define MFC_GET_VERSION_BITS(vr) (((vr) & MFC_VERSION_BITS) >> 16)
289#define MFC_GET_REVISION_BITS(vr) ((vr) & MFC_REVISION_BITS)
290 u64 spu_vr_RO; /* 0x020 */
291#define SPU_VERSION_BITS (0xffff << 16)
292#define SPU_REVISION_BITS (0xffff)
293#define SPU_GET_VERSION_BITS(vr) (vr & SPU_VERSION_BITS) >> 16
294#define SPU_GET_REVISION_BITS(vr) (vr & SPU_REVISION_BITS)
295 u8 pad_0x28_0x100[0x100 - 0x28]; /* 0x28 */
296
297
298 /* Interrupt Area */
299 u64 int_mask_class0_RW; /* 0x100 */
300#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L
301#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L
302#define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L
303#define CLASS0_ENABLE_MFC_FIR_INTR 0x8L
304 u64 int_mask_class1_RW; /* 0x108 */
305#define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L
306#define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L
307#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
308#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
309 u64 int_mask_class2_RW; /* 0x110 */
310#define CLASS2_ENABLE_MAILBOX_INTR 0x1L
311#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
312#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
313#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
314 u8 pad_0x118_0x140[0x28]; /* 0x118 */
315 u64 int_stat_class0_RW; /* 0x140 */
316 u64 int_stat_class1_RW; /* 0x148 */
317 u64 int_stat_class2_RW; /* 0x150 */
318 u8 pad_0x158_0x180[0x28]; /* 0x158 */
319 u64 int_route_RW; /* 0x180 */
320
321 /* Interrupt Routing */
322 u8 pad_0x188_0x200[0x200 - 0x188]; /* 0x188 */
323
324 /* Atomic Unit Control Area */
325 u64 mfc_atomic_flush_RW; /* 0x200 */
326#define mfc_atomic_flush_enable 0x1L
327 u8 pad_0x208_0x280[0x78]; /* 0x208 */
328 u64 resource_allocation_groupID_RW; /* 0x280 */
329 u64 resource_allocation_enable_RW; /* 0x288 */
330 u8 pad_0x290_0x3c8[0x3c8 - 0x290]; /* 0x290 */
331
332 /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
333
334 u64 smf_sbi_signal_sel; /* 0x3c8 */
335#define smf_sbi_mask_lsb 56
336#define smf_sbi_shift (63 - smf_sbi_mask_lsb)
337#define smf_sbi_mask (0x301LL << smf_sbi_shift)
338#define smf_sbi_bus0_bits (0x001LL << smf_sbi_shift)
339#define smf_sbi_bus2_bits (0x100LL << smf_sbi_shift)
340#define smf_sbi2_bus0_bits (0x201LL << smf_sbi_shift)
341#define smf_sbi2_bus2_bits (0x300LL << smf_sbi_shift)
342 u64 smf_ato_signal_sel; /* 0x3d0 */
343#define smf_ato_mask_lsb 35
344#define smf_ato_shift (63 - smf_ato_mask_lsb)
345#define smf_ato_mask (0x3LL << smf_ato_shift)
346#define smf_ato_bus0_bits (0x2LL << smf_ato_shift)
347#define smf_ato_bus2_bits (0x1LL << smf_ato_shift)
348 u8 pad_0x3d8_0x400[0x400 - 0x3d8]; /* 0x3d8 */
349
350 /* TLB Management Registers */
351 u64 mfc_sdr_RW; /* 0x400 */
352 u8 pad_0x408_0x500[0xf8]; /* 0x408 */
353 u64 tlb_index_hint_RO; /* 0x500 */
354 u64 tlb_index_W; /* 0x508 */
355 u64 tlb_vpn_RW; /* 0x510 */
356 u64 tlb_rpn_RW; /* 0x518 */
357 u8 pad_0x520_0x540[0x20]; /* 0x520 */
358 u64 tlb_invalidate_entry_W; /* 0x540 */
359 u64 tlb_invalidate_all_W; /* 0x548 */
360 u8 pad_0x550_0x580[0x580 - 0x550]; /* 0x550 */
361
362 /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
363 u64 smm_hid; /* 0x580 */
364#define PAGE_SIZE_MASK 0xf000000000000000ull
365#define PAGE_SIZE_16MB_64KB 0x2000000000000000ull
366 u8 pad_0x588_0x600[0x600 - 0x588]; /* 0x588 */
367
368 /* MFC Status/Control Area */
369 u64 mfc_accr_RW; /* 0x600 */
370#define MFC_ACCR_EA_ACCESS_GET (1 << 0)
371#define MFC_ACCR_EA_ACCESS_PUT (1 << 1)
372#define MFC_ACCR_LS_ACCESS_GET (1 << 3)
373#define MFC_ACCR_LS_ACCESS_PUT (1 << 4)
374 u8 pad_0x608_0x610[0x8]; /* 0x608 */
375 u64 mfc_dsisr_RW; /* 0x610 */
376#define MFC_DSISR_PTE_NOT_FOUND (1 << 30)
377#define MFC_DSISR_ACCESS_DENIED (1 << 27)
378#define MFC_DSISR_ATOMIC (1 << 26)
379#define MFC_DSISR_ACCESS_PUT (1 << 25)
380#define MFC_DSISR_ADDR_MATCH (1 << 22)
381#define MFC_DSISR_LS (1 << 17)
382#define MFC_DSISR_L (1 << 16)
383#define MFC_DSISR_ADDRESS_OVERFLOW (1 << 0)
384 u8 pad_0x618_0x620[0x8]; /* 0x618 */
385 u64 mfc_dar_RW; /* 0x620 */
386 u8 pad_0x628_0x700[0x700 - 0x628]; /* 0x628 */
387
388 /* Replacement Management Table (RMT) Area */
389 u64 rmt_index_RW; /* 0x700 */
390 u8 pad_0x708_0x710[0x8]; /* 0x708 */
391 u64 rmt_data1_RW; /* 0x710 */
392 u8 pad_0x718_0x800[0x800 - 0x718]; /* 0x718 */
393
394 /* Control/Configuration Registers */
395 u64 mfc_dsir_R; /* 0x800 */
396#define MFC_DSIR_Q (1 << 31)
397#define MFC_DSIR_SPU_QUEUE MFC_DSIR_Q
398 u64 mfc_lsacr_RW; /* 0x808 */
399#define MFC_LSACR_COMPARE_MASK ((~0ull) << 32)
400#define MFC_LSACR_COMPARE_ADDR ((~0ull) >> 32)
401 u64 mfc_lscrr_R; /* 0x810 */
402#define MFC_LSCRR_Q (1 << 31)
403#define MFC_LSCRR_SPU_QUEUE MFC_LSCRR_Q
404#define MFC_LSCRR_QI_SHIFT 32
405#define MFC_LSCRR_QI_MASK ((~0ull) << MFC_LSCRR_QI_SHIFT)
406 u8 pad_0x818_0x820[0x8]; /* 0x818 */
407 u64 mfc_tclass_id_RW; /* 0x820 */
408#define MFC_TCLASS_ID_ENABLE (1L << 0L)
409#define MFC_TCLASS_SLOT2_ENABLE (1L << 5L)
410#define MFC_TCLASS_SLOT1_ENABLE (1L << 6L)
411#define MFC_TCLASS_SLOT0_ENABLE (1L << 7L)
412#define MFC_TCLASS_QUOTA_2_SHIFT 8L
413#define MFC_TCLASS_QUOTA_1_SHIFT 16L
414#define MFC_TCLASS_QUOTA_0_SHIFT 24L
415#define MFC_TCLASS_QUOTA_2_MASK (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
416#define MFC_TCLASS_QUOTA_1_MASK (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
417#define MFC_TCLASS_QUOTA_0_MASK (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
418 u8 pad_0x828_0x900[0x900 - 0x828]; /* 0x828 */
419
420 /* Real Mode Support Registers */
421 u64 mfc_rm_boundary; /* 0x900 */
422 u8 pad_0x908_0x938[0x30]; /* 0x908 */
423 u64 smf_dma_signal_sel; /* 0x938 */
424#define mfc_dma1_mask_lsb 41
425#define mfc_dma1_shift (63 - mfc_dma1_mask_lsb)
426#define mfc_dma1_mask (0x3LL << mfc_dma1_shift)
427#define mfc_dma1_bits (0x1LL << mfc_dma1_shift)
428#define mfc_dma2_mask_lsb 43
429#define mfc_dma2_shift (63 - mfc_dma2_mask_lsb)
430#define mfc_dma2_mask (0x3LL << mfc_dma2_shift)
431#define mfc_dma2_bits (0x1LL << mfc_dma2_shift)
432 u8 pad_0x940_0xa38[0xf8]; /* 0x940 */
433 u64 smm_signal_sel; /* 0xa38 */
434#define smm_sig_mask_lsb 12
435#define smm_sig_shift (63 - smm_sig_mask_lsb)
436#define smm_sig_mask (0x3LL << smm_sig_shift)
437#define smm_sig_bus0_bits (0x2LL << smm_sig_shift)
438#define smm_sig_bus2_bits (0x1LL << smm_sig_shift)
439 u8 pad_0xa40_0xc00[0xc00 - 0xa40]; /* 0xa40 */
440
441 /* DMA Command Error Area */
442 u64 mfc_cer_R; /* 0xc00 */
443#define MFC_CER_Q (1 << 31)
444#define MFC_CER_SPU_QUEUE MFC_CER_Q
445 u8 pad_0xc08_0x1000[0x1000 - 0xc08]; /* 0xc08 */
446
447 /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
448 /* DMA Command Error Area */
449 u64 spu_ecc_cntl_RW; /* 0x1000 */
450#define SPU_ECC_CNTL_E (1ull << 0ull)
451#define SPU_ECC_CNTL_ENABLE SPU_ECC_CNTL_E
452#define SPU_ECC_CNTL_DISABLE (~SPU_ECC_CNTL_E & 1L)
453#define SPU_ECC_CNTL_S (1ull << 1ull)
454#define SPU_ECC_STOP_AFTER_ERROR SPU_ECC_CNTL_S
455#define SPU_ECC_CONTINUE_AFTER_ERROR (~SPU_ECC_CNTL_S & 2L)
456#define SPU_ECC_CNTL_B (1ull << 2ull)
457#define SPU_ECC_BACKGROUND_ENABLE SPU_ECC_CNTL_B
458#define SPU_ECC_BACKGROUND_DISABLE (~SPU_ECC_CNTL_B & 4L)
459#define SPU_ECC_CNTL_I_SHIFT 3ull
460#define SPU_ECC_CNTL_I_MASK (3ull << SPU_ECC_CNTL_I_SHIFT)
461#define SPU_ECC_WRITE_ALWAYS (~SPU_ECC_CNTL_I & 12L)
462#define SPU_ECC_WRITE_CORRECTABLE (1ull << SPU_ECC_CNTL_I_SHIFT)
463#define SPU_ECC_WRITE_UNCORRECTABLE (3ull << SPU_ECC_CNTL_I_SHIFT)
464#define SPU_ECC_CNTL_D (1ull << 5ull)
465#define SPU_ECC_DETECTION_ENABLE SPU_ECC_CNTL_D
466#define SPU_ECC_DETECTION_DISABLE (~SPU_ECC_CNTL_D & 32L)
467 u64 spu_ecc_stat_RW; /* 0x1008 */
468#define SPU_ECC_CORRECTED_ERROR (1ull << 0ul)
469#define SPU_ECC_UNCORRECTED_ERROR (1ull << 1ul)
470#define SPU_ECC_SCRUB_COMPLETE (1ull << 2ul)
471#define SPU_ECC_SCRUB_IN_PROGRESS (1ull << 3ul)
472#define SPU_ECC_INSTRUCTION_ERROR (1ull << 4ul)
473#define SPU_ECC_DATA_ERROR (1ull << 5ul)
474#define SPU_ECC_DMA_ERROR (1ull << 6ul)
475#define SPU_ECC_STATUS_CNT_MASK (256ull << 8)
476 u64 spu_ecc_addr_RW; /* 0x1010 */
477 u64 spu_err_mask_RW; /* 0x1018 */
478#define SPU_ERR_ILLEGAL_INSTR (1ull << 0ul)
479#define SPU_ERR_ILLEGAL_CHANNEL (1ull << 1ul)
480 u8 pad_0x1020_0x1028[0x1028 - 0x1020]; /* 0x1020 */
481
482 /* SPU Debug-Trace Bus (DTB) Selection Registers */
483 u64 spu_trig0_sel; /* 0x1028 */
484 u64 spu_trig1_sel; /* 0x1030 */
485 u64 spu_trig2_sel; /* 0x1038 */
486 u64 spu_trig3_sel; /* 0x1040 */
487 u64 spu_trace_sel; /* 0x1048 */
488#define spu_trace_sel_mask 0x1f1fLL
489#define spu_trace_sel_bus0_bits 0x1000LL
490#define spu_trace_sel_bus2_bits 0x0010LL
491 u64 spu_event0_sel; /* 0x1050 */
492 u64 spu_event1_sel; /* 0x1058 */
493 u64 spu_event2_sel; /* 0x1060 */
494 u64 spu_event3_sel; /* 0x1068 */
495 u64 spu_trace_cntl; /* 0x1070 */
496} __attribute__ ((aligned(0x2000)));
497
498#endif
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 0991dfceef1d..9606349855da 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -296,6 +296,8 @@
296#define __NR_inotify_init 275 296#define __NR_inotify_init 275
297#define __NR_inotify_add_watch 276 297#define __NR_inotify_add_watch 276
298#define __NR_inotify_rm_watch 277 298#define __NR_inotify_rm_watch 277
299#define __NR_spu_run 278
300#define __NR_spu_create 279
299 301
300#define __NR_syscalls 278 302#define __NR_syscalls 278
301 303
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c7007b1db91d..44fdd48d38e6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -512,4 +512,9 @@ asmlinkage long sys_ioprio_get(int which, int who);
512asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, 512asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
513 unsigned long maxnode); 513 unsigned long maxnode);
514 514
515asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
516 __u32 __user *ustatus);
517asmlinkage long sys_spu_create(const char __user *name,
518 unsigned int flags, mode_t mode);
519
515#endif 520#endif
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1ab2370e2efa..d4739a475d23 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -90,3 +90,5 @@ cond_syscall(sys_pciconfig_iobase);
90cond_syscall(sys32_ipc); 90cond_syscall(sys32_ipc);
91cond_syscall(sys32_sysctl); 91cond_syscall(sys32_sysctl);
92cond_syscall(ppc_rtas); 92cond_syscall(ppc_rtas);
93cond_syscall(sys_spu_run);
94cond_syscall(sys_spu_create);
diff --git a/mm/memory.c b/mm/memory.c
index 7197f9bcd384..3944fec38012 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2267,6 +2267,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2267 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2267 return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2268} 2268}
2269 2269
2270EXPORT_SYMBOL_GPL(__handle_mm_fault);
2271
2270#ifndef __PAGETABLE_PUD_FOLDED 2272#ifndef __PAGETABLE_PUD_FOLDED
2271/* 2273/*
2272 * Allocate page upper directory. 2274 * Allocate page upper directory.