aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 22:01:28 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 22:01:28 -0400
commita43cdf08a1b1ab3c013059b5fa4c1b7561e53cb7 (patch)
tree1d36874f77855cdfdaf4a86542933b1277162607
parent97d41e90fe61399b99d74820cb7f2d6e0fbac91d (diff)
parent43b4f4061cf54aa225a1e94a969450ccf5305cd9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] cell: fix bugs found by sparse [POWERPC] spiderpic: enable new style devtree support [POWERPC] Update cell_defconfig [POWERPC] spufs: add infrastructure for finding elf objects [POWERPC] spufs: support new OF device tree format [POWERPC] spufs: add support for read/write on cntl [POWERPC] spufs: remove support for ancient firmware [POWERPC] spufs: make mailbox functions handle multiple elements [POWERPC] spufs: use correct pg_prot for mapping SPU local store [POWERPC] spufs: Add infrastructure needed for gang scheduling [POWERPC] spufs: implement error event delivery to user space [POWERPC] spufs: fix context switch during page fault [POWERPC] spufs: scheduler support for NUMA. [POWERPC] spufs: cell spu problem state mapping updates
-rw-r--r--arch/powerpc/configs/cell_defconfig60
-rw-r--r--arch/powerpc/platforms/cell/Kconfig5
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c10
-rw-r--r--arch/powerpc/platforms/cell/iommu.c8
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c6
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c168
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c354
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c81
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c232
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c48
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c450
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h29
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c9
-rw-r--r--include/asm-powerpc/spu.h36
17 files changed, 1017 insertions, 496 deletions
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 6fd9e7acec29..892d5dd3254e 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18-rc6 3# Linux kernel version: 2.6.18
4# Sun Sep 10 10:20:32 2006 4# Wed Oct 4 15:30:50 2006
5# 5#
6CONFIG_PPC64=y 6CONFIG_PPC64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -22,6 +22,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
22CONFIG_PPC_OF=y 22CONFIG_PPC_OF=y
23CONFIG_PPC_UDBG_16550=y 23CONFIG_PPC_UDBG_16550=y
24# CONFIG_GENERIC_TBSYNC is not set 24# CONFIG_GENERIC_TBSYNC is not set
25CONFIG_AUDIT_ARCH=y
25# CONFIG_DEFAULT_UIMAGE is not set 26# CONFIG_DEFAULT_UIMAGE is not set
26 27
27# 28#
@@ -52,10 +53,11 @@ CONFIG_LOCALVERSION=""
52CONFIG_LOCALVERSION_AUTO=y 53CONFIG_LOCALVERSION_AUTO=y
53CONFIG_SWAP=y 54CONFIG_SWAP=y
54CONFIG_SYSVIPC=y 55CONFIG_SYSVIPC=y
56# CONFIG_IPC_NS is not set
55# CONFIG_POSIX_MQUEUE is not set 57# CONFIG_POSIX_MQUEUE is not set
56# CONFIG_BSD_PROCESS_ACCT is not set 58# CONFIG_BSD_PROCESS_ACCT is not set
57# CONFIG_TASKSTATS is not set 59# CONFIG_TASKSTATS is not set
58CONFIG_SYSCTL=y 60# CONFIG_UTS_NS is not set
59# CONFIG_AUDIT is not set 61# CONFIG_AUDIT is not set
60CONFIG_IKCONFIG=y 62CONFIG_IKCONFIG=y
61CONFIG_IKCONFIG_PROC=y 63CONFIG_IKCONFIG_PROC=y
@@ -63,7 +65,9 @@ CONFIG_CPUSETS=y
63# CONFIG_RELAY is not set 65# CONFIG_RELAY is not set
64CONFIG_INITRAMFS_SOURCE="" 66CONFIG_INITRAMFS_SOURCE=""
65CONFIG_CC_OPTIMIZE_FOR_SIZE=y 67CONFIG_CC_OPTIMIZE_FOR_SIZE=y
68CONFIG_SYSCTL=y
66# CONFIG_EMBEDDED is not set 69# CONFIG_EMBEDDED is not set
70# CONFIG_SYSCTL_SYSCALL is not set
67CONFIG_KALLSYMS=y 71CONFIG_KALLSYMS=y
68# CONFIG_KALLSYMS_ALL is not set 72# CONFIG_KALLSYMS_ALL is not set
69# CONFIG_KALLSYMS_EXTRA_PASS is not set 73# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -72,12 +76,12 @@ CONFIG_PRINTK=y
72CONFIG_BUG=y 76CONFIG_BUG=y
73CONFIG_ELF_CORE=y 77CONFIG_ELF_CORE=y
74CONFIG_BASE_FULL=y 78CONFIG_BASE_FULL=y
75CONFIG_RT_MUTEXES=y
76CONFIG_FUTEX=y 79CONFIG_FUTEX=y
77CONFIG_EPOLL=y 80CONFIG_EPOLL=y
78CONFIG_SHMEM=y 81CONFIG_SHMEM=y
79CONFIG_SLAB=y 82CONFIG_SLAB=y
80CONFIG_VM_EVENT_COUNTERS=y 83CONFIG_VM_EVENT_COUNTERS=y
84CONFIG_RT_MUTEXES=y
81# CONFIG_TINY_SHMEM is not set 85# CONFIG_TINY_SHMEM is not set
82CONFIG_BASE_SMALL=0 86CONFIG_BASE_SMALL=0
83# CONFIG_SLOB is not set 87# CONFIG_SLOB is not set
@@ -96,6 +100,7 @@ CONFIG_STOP_MACHINE=y
96# 100#
97# Block layer 101# Block layer
98# 102#
103CONFIG_BLOCK=y
99# CONFIG_BLK_DEV_IO_TRACE is not set 104# CONFIG_BLK_DEV_IO_TRACE is not set
100 105
101# 106#
@@ -115,12 +120,13 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
115# Platform support 120# Platform support
116# 121#
117CONFIG_PPC_MULTIPLATFORM=y 122CONFIG_PPC_MULTIPLATFORM=y
118# CONFIG_PPC_ISERIES is not set
119# CONFIG_EMBEDDED6xx is not set 123# CONFIG_EMBEDDED6xx is not set
120# CONFIG_APUS is not set 124# CONFIG_APUS is not set
121# CONFIG_PPC_PSERIES is not set 125# CONFIG_PPC_PSERIES is not set
126# CONFIG_PPC_ISERIES is not set
122# CONFIG_PPC_PMAC is not set 127# CONFIG_PPC_PMAC is not set
123# CONFIG_PPC_MAPLE is not set 128# CONFIG_PPC_MAPLE is not set
129# CONFIG_PPC_PASEMI is not set
124CONFIG_PPC_CELL=y 130CONFIG_PPC_CELL=y
125CONFIG_PPC_CELL_NATIVE=y 131CONFIG_PPC_CELL_NATIVE=y
126CONFIG_PPC_IBM_CELL_BLADE=y 132CONFIG_PPC_IBM_CELL_BLADE=y
@@ -142,7 +148,6 @@ CONFIG_MMIO_NVRAM=y
142# 148#
143CONFIG_SPU_FS=m 149CONFIG_SPU_FS=m
144CONFIG_SPU_BASE=y 150CONFIG_SPU_BASE=y
145CONFIG_SPUFS_MMAP=y
146CONFIG_CBE_RAS=y 151CONFIG_CBE_RAS=y
147 152
148# 153#
@@ -158,7 +163,7 @@ CONFIG_PREEMPT_NONE=y
158CONFIG_PREEMPT_BKL=y 163CONFIG_PREEMPT_BKL=y
159CONFIG_BINFMT_ELF=y 164CONFIG_BINFMT_ELF=y
160CONFIG_BINFMT_MISC=m 165CONFIG_BINFMT_MISC=m
161CONFIG_FORCE_MAX_ZONEORDER=13 166CONFIG_FORCE_MAX_ZONEORDER=9
162# CONFIG_IOMMU_VMERGE is not set 167# CONFIG_IOMMU_VMERGE is not set
163CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y 168CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
164CONFIG_KEXEC=y 169CONFIG_KEXEC=y
@@ -168,6 +173,7 @@ CONFIG_NUMA=y
168CONFIG_NODES_SHIFT=4 173CONFIG_NODES_SHIFT=4
169CONFIG_ARCH_SELECT_MEMORY_MODEL=y 174CONFIG_ARCH_SELECT_MEMORY_MODEL=y
170CONFIG_ARCH_SPARSEMEM_ENABLE=y 175CONFIG_ARCH_SPARSEMEM_ENABLE=y
176CONFIG_ARCH_POPULATES_NODE_MAP=y
171CONFIG_SELECT_MEMORY_MODEL=y 177CONFIG_SELECT_MEMORY_MODEL=y
172# CONFIG_FLATMEM_MANUAL is not set 178# CONFIG_FLATMEM_MANUAL is not set
173# CONFIG_DISCONTIGMEM_MANUAL is not set 179# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -178,12 +184,12 @@ CONFIG_HAVE_MEMORY_PRESENT=y
178# CONFIG_SPARSEMEM_STATIC is not set 184# CONFIG_SPARSEMEM_STATIC is not set
179CONFIG_SPARSEMEM_EXTREME=y 185CONFIG_SPARSEMEM_EXTREME=y
180CONFIG_MEMORY_HOTPLUG=y 186CONFIG_MEMORY_HOTPLUG=y
187CONFIG_MEMORY_HOTPLUG_SPARSE=y
181CONFIG_SPLIT_PTLOCK_CPUS=4 188CONFIG_SPLIT_PTLOCK_CPUS=4
182CONFIG_MIGRATION=y 189CONFIG_MIGRATION=y
183CONFIG_RESOURCES_64BIT=y 190CONFIG_RESOURCES_64BIT=y
184CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
185CONFIG_ARCH_MEMORY_PROBE=y 191CONFIG_ARCH_MEMORY_PROBE=y
186# CONFIG_PPC_64K_PAGES is not set 192CONFIG_PPC_64K_PAGES=y
187CONFIG_SCHED_SMT=y 193CONFIG_SCHED_SMT=y
188CONFIG_PROC_DEVICETREE=y 194CONFIG_PROC_DEVICETREE=y
189# CONFIG_CMDLINE_BOOL is not set 195# CONFIG_CMDLINE_BOOL is not set
@@ -201,6 +207,7 @@ CONFIG_GENERIC_ISA_DMA=y
201CONFIG_PCI=y 207CONFIG_PCI=y
202CONFIG_PCI_DOMAINS=y 208CONFIG_PCI_DOMAINS=y
203CONFIG_PCIEPORTBUS=y 209CONFIG_PCIEPORTBUS=y
210# CONFIG_PCI_MULTITHREAD_PROBE is not set
204# CONFIG_PCI_DEBUG is not set 211# CONFIG_PCI_DEBUG is not set
205 212
206# 213#
@@ -228,6 +235,7 @@ CONFIG_PACKET=y
228CONFIG_UNIX=y 235CONFIG_UNIX=y
229CONFIG_XFRM=y 236CONFIG_XFRM=y
230# CONFIG_XFRM_USER is not set 237# CONFIG_XFRM_USER is not set
238# CONFIG_XFRM_SUB_POLICY is not set
231# CONFIG_NET_KEY is not set 239# CONFIG_NET_KEY is not set
232CONFIG_INET=y 240CONFIG_INET=y
233CONFIG_IP_MULTICAST=y 241CONFIG_IP_MULTICAST=y
@@ -249,7 +257,8 @@ CONFIG_INET_XFRM_MODE_TUNNEL=y
249CONFIG_INET_DIAG=y 257CONFIG_INET_DIAG=y
250CONFIG_INET_TCP_DIAG=y 258CONFIG_INET_TCP_DIAG=y
251# CONFIG_TCP_CONG_ADVANCED is not set 259# CONFIG_TCP_CONG_ADVANCED is not set
252CONFIG_TCP_CONG_BIC=y 260CONFIG_TCP_CONG_CUBIC=y
261CONFIG_DEFAULT_TCP_CONG="cubic"
253 262
254# 263#
255# IP: Virtual Server Configuration 264# IP: Virtual Server Configuration
@@ -261,11 +270,15 @@ CONFIG_IPV6=y
261CONFIG_INET6_AH=m 270CONFIG_INET6_AH=m
262CONFIG_INET6_ESP=m 271CONFIG_INET6_ESP=m
263CONFIG_INET6_IPCOMP=m 272CONFIG_INET6_IPCOMP=m
273# CONFIG_IPV6_MIP6 is not set
264CONFIG_INET6_XFRM_TUNNEL=m 274CONFIG_INET6_XFRM_TUNNEL=m
265CONFIG_INET6_TUNNEL=m 275CONFIG_INET6_TUNNEL=m
266CONFIG_INET6_XFRM_MODE_TRANSPORT=y 276CONFIG_INET6_XFRM_MODE_TRANSPORT=y
267CONFIG_INET6_XFRM_MODE_TUNNEL=y 277CONFIG_INET6_XFRM_MODE_TUNNEL=y
278# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
268CONFIG_IPV6_TUNNEL=m 279CONFIG_IPV6_TUNNEL=m
280# CONFIG_IPV6_SUBTREES is not set
281# CONFIG_IPV6_MULTIPLE_TABLES is not set
269# CONFIG_NETWORK_SECMARK is not set 282# CONFIG_NETWORK_SECMARK is not set
270CONFIG_NETFILTER=y 283CONFIG_NETFILTER=y
271# CONFIG_NETFILTER_DEBUG is not set 284# CONFIG_NETFILTER_DEBUG is not set
@@ -322,7 +335,6 @@ CONFIG_IP_NF_QUEUE=m
322# CONFIG_ATALK is not set 335# CONFIG_ATALK is not set
323# CONFIG_X25 is not set 336# CONFIG_X25 is not set
324# CONFIG_LAPB is not set 337# CONFIG_LAPB is not set
325# CONFIG_NET_DIVERT is not set
326# CONFIG_ECONET is not set 338# CONFIG_ECONET is not set
327# CONFIG_WAN_ROUTER is not set 339# CONFIG_WAN_ROUTER is not set
328 340
@@ -434,6 +446,7 @@ CONFIG_BLK_DEV_AEC62XX=y
434# CONFIG_BLK_DEV_CS5530 is not set 446# CONFIG_BLK_DEV_CS5530 is not set
435# CONFIG_BLK_DEV_HPT34X is not set 447# CONFIG_BLK_DEV_HPT34X is not set
436# CONFIG_BLK_DEV_HPT366 is not set 448# CONFIG_BLK_DEV_HPT366 is not set
449# CONFIG_BLK_DEV_JMICRON is not set
437# CONFIG_BLK_DEV_SC1200 is not set 450# CONFIG_BLK_DEV_SC1200 is not set
438# CONFIG_BLK_DEV_PIIX is not set 451# CONFIG_BLK_DEV_PIIX is not set
439# CONFIG_BLK_DEV_IT821X is not set 452# CONFIG_BLK_DEV_IT821X is not set
@@ -456,6 +469,12 @@ CONFIG_IDEDMA_AUTO=y
456# 469#
457# CONFIG_RAID_ATTRS is not set 470# CONFIG_RAID_ATTRS is not set
458# CONFIG_SCSI is not set 471# CONFIG_SCSI is not set
472# CONFIG_SCSI_NETLINK is not set
473
474#
475# Serial ATA (prod) and Parallel ATA (experimental) drivers
476#
477# CONFIG_ATA is not set
459 478
460# 479#
461# Multi-device support (RAID and LVM) 480# Multi-device support (RAID and LVM)
@@ -470,6 +489,7 @@ CONFIG_MD_RAID1=m
470# CONFIG_MD_MULTIPATH is not set 489# CONFIG_MD_MULTIPATH is not set
471# CONFIG_MD_FAULTY is not set 490# CONFIG_MD_FAULTY is not set
472CONFIG_BLK_DEV_DM=m 491CONFIG_BLK_DEV_DM=m
492# CONFIG_DM_DEBUG is not set
473CONFIG_DM_CRYPT=m 493CONFIG_DM_CRYPT=m
474CONFIG_DM_SNAPSHOT=m 494CONFIG_DM_SNAPSHOT=m
475CONFIG_DM_MIRROR=m 495CONFIG_DM_MIRROR=m
@@ -504,7 +524,7 @@ CONFIG_NETDEVICES=y
504# CONFIG_DUMMY is not set 524# CONFIG_DUMMY is not set
505CONFIG_BONDING=y 525CONFIG_BONDING=y
506# CONFIG_EQUALIZER is not set 526# CONFIG_EQUALIZER is not set
507# CONFIG_TUN is not set 527CONFIG_TUN=y
508 528
509# 529#
510# ARCnet devices 530# ARCnet devices
@@ -552,7 +572,7 @@ CONFIG_SKGE=m
552# CONFIG_TIGON3 is not set 572# CONFIG_TIGON3 is not set
553# CONFIG_BNX2 is not set 573# CONFIG_BNX2 is not set
554CONFIG_SPIDER_NET=m 574CONFIG_SPIDER_NET=m
555# CONFIG_MV643XX_ETH is not set 575# CONFIG_QLA3XXX is not set
556 576
557# 577#
558# Ethernet (10000 Mbit) 578# Ethernet (10000 Mbit)
@@ -599,6 +619,7 @@ CONFIG_SPIDER_NET=m
599# Input device support 619# Input device support
600# 620#
601CONFIG_INPUT=y 621CONFIG_INPUT=y
622# CONFIG_INPUT_FF_MEMLESS is not set
602 623
603# 624#
604# Userland interfaces 625# Userland interfaces
@@ -865,6 +886,7 @@ CONFIG_INFINIBAND_USER_ACCESS=m
865CONFIG_INFINIBAND_ADDR_TRANS=y 886CONFIG_INFINIBAND_ADDR_TRANS=y
866CONFIG_INFINIBAND_MTHCA=m 887CONFIG_INFINIBAND_MTHCA=m
867CONFIG_INFINIBAND_MTHCA_DEBUG=y 888CONFIG_INFINIBAND_MTHCA_DEBUG=y
889# CONFIG_INFINIBAND_AMSO1100 is not set
868CONFIG_INFINIBAND_IPOIB=m 890CONFIG_INFINIBAND_IPOIB=m
869CONFIG_INFINIBAND_IPOIB_DEBUG=y 891CONFIG_INFINIBAND_IPOIB_DEBUG=y
870CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y 892CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
@@ -916,7 +938,7 @@ CONFIG_INOTIFY_USER=y
916# CONFIG_QUOTA is not set 938# CONFIG_QUOTA is not set
917CONFIG_DNOTIFY=y 939CONFIG_DNOTIFY=y
918# CONFIG_AUTOFS_FS is not set 940# CONFIG_AUTOFS_FS is not set
919# CONFIG_AUTOFS4_FS is not set 941CONFIG_AUTOFS4_FS=m
920# CONFIG_FUSE_FS is not set 942# CONFIG_FUSE_FS is not set
921 943
922# 944#
@@ -943,8 +965,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
943# 965#
944CONFIG_PROC_FS=y 966CONFIG_PROC_FS=y
945CONFIG_PROC_KCORE=y 967CONFIG_PROC_KCORE=y
968CONFIG_PROC_SYSCTL=y
946CONFIG_SYSFS=y 969CONFIG_SYSFS=y
947CONFIG_TMPFS=y 970CONFIG_TMPFS=y
971# CONFIG_TMPFS_POSIX_ACL is not set
948CONFIG_HUGETLBFS=y 972CONFIG_HUGETLBFS=y
949CONFIG_HUGETLB_PAGE=y 973CONFIG_HUGETLB_PAGE=y
950CONFIG_RAMFS=y 974CONFIG_RAMFS=y
@@ -1084,6 +1108,7 @@ CONFIG_PLIST=y
1084# Kernel hacking 1108# Kernel hacking
1085# 1109#
1086# CONFIG_PRINTK_TIME is not set 1110# CONFIG_PRINTK_TIME is not set
1111# CONFIG_ENABLE_MUST_CHECK is not set
1087CONFIG_MAGIC_SYSRQ=y 1112CONFIG_MAGIC_SYSRQ=y
1088# CONFIG_UNUSED_SYMBOLS is not set 1113# CONFIG_UNUSED_SYMBOLS is not set
1089CONFIG_DEBUG_KERNEL=y 1114CONFIG_DEBUG_KERNEL=y
@@ -1102,6 +1127,7 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
1102# CONFIG_DEBUG_INFO is not set 1127# CONFIG_DEBUG_INFO is not set
1103CONFIG_DEBUG_FS=y 1128CONFIG_DEBUG_FS=y
1104# CONFIG_DEBUG_VM is not set 1129# CONFIG_DEBUG_VM is not set
1130# CONFIG_DEBUG_LIST is not set
1105# CONFIG_FORCED_INLINING is not set 1131# CONFIG_FORCED_INLINING is not set
1106# CONFIG_RCU_TORTURE_TEST is not set 1132# CONFIG_RCU_TORTURE_TEST is not set
1107# CONFIG_DEBUG_STACKOVERFLOW is not set 1133# CONFIG_DEBUG_STACKOVERFLOW is not set
@@ -1123,6 +1149,10 @@ CONFIG_IRQSTACKS=y
1123# Cryptographic options 1149# Cryptographic options
1124# 1150#
1125CONFIG_CRYPTO=y 1151CONFIG_CRYPTO=y
1152CONFIG_CRYPTO_ALGAPI=y
1153CONFIG_CRYPTO_BLKCIPHER=m
1154CONFIG_CRYPTO_HASH=y
1155# CONFIG_CRYPTO_MANAGER is not set
1126CONFIG_CRYPTO_HMAC=y 1156CONFIG_CRYPTO_HMAC=y
1127# CONFIG_CRYPTO_NULL is not set 1157# CONFIG_CRYPTO_NULL is not set
1128# CONFIG_CRYPTO_MD4 is not set 1158# CONFIG_CRYPTO_MD4 is not set
@@ -1132,6 +1162,8 @@ CONFIG_CRYPTO_SHA1=m
1132# CONFIG_CRYPTO_SHA512 is not set 1162# CONFIG_CRYPTO_SHA512 is not set
1133# CONFIG_CRYPTO_WP512 is not set 1163# CONFIG_CRYPTO_WP512 is not set
1134# CONFIG_CRYPTO_TGR192 is not set 1164# CONFIG_CRYPTO_TGR192 is not set
1165CONFIG_CRYPTO_ECB=m
1166CONFIG_CRYPTO_CBC=m
1135CONFIG_CRYPTO_DES=m 1167CONFIG_CRYPTO_DES=m
1136# CONFIG_CRYPTO_BLOWFISH is not set 1168# CONFIG_CRYPTO_BLOWFISH is not set
1137# CONFIG_CRYPTO_TWOFISH is not set 1169# CONFIG_CRYPTO_TWOFISH is not set
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 0c8c7b6ab897..3e430b489bb7 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -16,11 +16,6 @@ config SPU_BASE
16 bool 16 bool
17 default n 17 default n
18 18
19config SPUFS_MMAP
20 bool
21 depends on SPU_FS && SPARSEMEM
22 default y
23
24config CBE_RAS 19config CBE_RAS
25 bool "RAS features for bare metal Cell BE" 20 bool "RAS features for bare metal Cell BE"
26 default y 21 default y
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 6cc59e0b4582..8533f13a5ed1 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -101,7 +101,7 @@ static void iic_ioexc_eoi(unsigned int irq)
101static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc, 101static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc,
102 struct pt_regs *regs) 102 struct pt_regs *regs)
103{ 103{
104 struct cbe_iic_regs *node_iic = desc->handler_data; 104 struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; 105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
106 unsigned long bits, ack; 106 unsigned long bits, ack;
107 int cascade; 107 int cascade;
@@ -320,7 +320,7 @@ static int __init setup_iic(void)
320 struct device_node *dn; 320 struct device_node *dn;
321 struct resource r0, r1; 321 struct resource r0, r1;
322 unsigned int node, cascade, found = 0; 322 unsigned int node, cascade, found = 0;
323 struct cbe_iic_regs *node_iic; 323 struct cbe_iic_regs __iomem *node_iic;
324 const u32 *np; 324 const u32 *np;
325 325
326 for (dn = NULL; 326 for (dn = NULL;
@@ -357,7 +357,11 @@ static int __init setup_iic(void)
357 cascade = irq_create_mapping(iic_host, cascade); 357 cascade = irq_create_mapping(iic_host, cascade);
358 if (cascade == NO_IRQ) 358 if (cascade == NO_IRQ)
359 continue; 359 continue;
360 set_irq_data(cascade, node_iic); 360 /*
361 * irq_data is a generic pointer that gets passed back
362 * to us later, so the forced cast is fine.
363 */
364 set_irq_data(cascade, (void __force *)node_iic);
361 set_irq_chained_handler(cascade , iic_ioexc_cascade); 365 set_irq_chained_handler(cascade , iic_ioexc_cascade);
362 out_be64(&node_iic->iic_ir, 366 out_be64(&node_iic->iic_ir,
363 (1 << 12) /* priority */ | 367 (1 << 12) /* priority */ |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index d2b20eba5b87..aca4c3db0dde 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -345,8 +345,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
345 345
346 /* node 0 */ 346 /* node 0 */
347 iommu = &cell_iommus[0]; 347 iommu = &cell_iommus[0];
348 iommu->mapped_base = ioremap(0x20000511000, 0x1000); 348 iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
349 iommu->mapped_mmio_base = ioremap(0x20000510000, 0x1000); 349 iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
350 350
351 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); 351 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
352 352
@@ -358,8 +358,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
358 358
359 /* node 1 */ 359 /* node 1 */
360 iommu = &cell_iommus[1]; 360 iommu = &cell_iommus[1];
361 iommu->mapped_base = ioremap(0x30000511000, 0x1000); 361 iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
362 iommu->mapped_mmio_base = ioremap(0x30000510000, 0x1000); 362 iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
363 363
364 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); 364 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
365 365
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 608b1ebc56b2..b0e95d594c51 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -244,7 +244,6 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
244 int imaplen, intsize, unit; 244 int imaplen, intsize, unit;
245 struct device_node *iic; 245 struct device_node *iic;
246 246
247#if 0 /* Enable that when we have a way to retreive the node as well */
248 /* First, we check wether we have a real "interrupts" in the device 247 /* First, we check wether we have a real "interrupts" in the device
249 * tree in case the device-tree is ever fixed 248 * tree in case the device-tree is ever fixed
250 */ 249 */
@@ -252,9 +251,8 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
252 if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { 251 if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) {
253 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 252 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
254 oirq.size); 253 oirq.size);
255 goto bail; 254 return virq;
256 } 255 }
257#endif
258 256
259 /* Now do the horrible hacks */ 257 /* Now do the horrible hacks */
260 tmp = get_property(pic->of_node, "#interrupt-cells", NULL); 258 tmp = get_property(pic->of_node, "#interrupt-cells", NULL);
@@ -369,7 +367,7 @@ void __init spider_init_IRQ(void)
369 } else if (device_is_compatible(dn, "sti,platform-spider-pic") 367 } else if (device_is_compatible(dn, "sti,platform-spider-pic")
370 && (chip < 2)) { 368 && (chip < 2)) {
371 static long hard_coded_pics[] = 369 static long hard_coded_pics[] =
372 { 0x24000008000, 0x34000008000 }; 370 { 0x24000008000ul, 0x34000008000ul};
373 r.start = hard_coded_pics[chip]; 371 r.start = hard_coded_pics[chip];
374 } else 372 } else
375 continue; 373 continue;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index f78680346e5f..ac5f12662dbb 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,11 +25,13 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/pci.h>
28#include <linux/poll.h> 29#include <linux/poll.h>
29#include <linux/ptrace.h> 30#include <linux/ptrace.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
32 33
34#include <asm/firmware.h>
33#include <asm/io.h> 35#include <asm/io.h>
34#include <asm/prom.h> 36#include <asm/prom.h>
35#include <linux/mutex.h> 37#include <linux/mutex.h>
@@ -46,21 +48,21 @@ EXPORT_SYMBOL_GPL(spu_priv1_ops);
46static int __spu_trap_invalid_dma(struct spu *spu) 48static int __spu_trap_invalid_dma(struct spu *spu)
47{ 49{
48 pr_debug("%s\n", __FUNCTION__); 50 pr_debug("%s\n", __FUNCTION__);
49 force_sig(SIGBUS, /* info, */ current); 51 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
50 return 0; 52 return 0;
51} 53}
52 54
53static int __spu_trap_dma_align(struct spu *spu) 55static int __spu_trap_dma_align(struct spu *spu)
54{ 56{
55 pr_debug("%s\n", __FUNCTION__); 57 pr_debug("%s\n", __FUNCTION__);
56 force_sig(SIGBUS, /* info, */ current); 58 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
57 return 0; 59 return 0;
58} 60}
59 61
60static int __spu_trap_error(struct spu *spu) 62static int __spu_trap_error(struct spu *spu)
61{ 63{
62 pr_debug("%s\n", __FUNCTION__); 64 pr_debug("%s\n", __FUNCTION__);
63 force_sig(SIGILL, /* info, */ current); 65 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
64 return 0; 66 return 0;
65} 67}
66 68
@@ -317,7 +319,7 @@ static void spu_free_irqs(struct spu *spu)
317 free_irq(spu->irqs[2], spu); 319 free_irq(spu->irqs[2], spu);
318} 320}
319 321
320static LIST_HEAD(spu_list); 322static struct list_head spu_list[MAX_NUMNODES];
321static DEFINE_MUTEX(spu_mutex); 323static DEFINE_MUTEX(spu_mutex);
322 324
323static void spu_init_channels(struct spu *spu) 325static void spu_init_channels(struct spu *spu)
@@ -354,32 +356,42 @@ static void spu_init_channels(struct spu *spu)
354 } 356 }
355} 357}
356 358
357struct spu *spu_alloc(void) 359struct spu *spu_alloc_node(int node)
358{ 360{
359 struct spu *spu; 361 struct spu *spu = NULL;
360 362
361 mutex_lock(&spu_mutex); 363 mutex_lock(&spu_mutex);
362 if (!list_empty(&spu_list)) { 364 if (!list_empty(&spu_list[node])) {
363 spu = list_entry(spu_list.next, struct spu, list); 365 spu = list_entry(spu_list[node].next, struct spu, list);
364 list_del_init(&spu->list); 366 list_del_init(&spu->list);
365 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); 367 pr_debug("Got SPU %x %d %d\n",
366 } else { 368 spu->isrc, spu->number, spu->node);
367 pr_debug("No SPU left\n"); 369 spu_init_channels(spu);
368 spu = NULL;
369 } 370 }
370 mutex_unlock(&spu_mutex); 371 mutex_unlock(&spu_mutex);
371 372
372 if (spu) 373 return spu;
373 spu_init_channels(spu); 374}
375EXPORT_SYMBOL_GPL(spu_alloc_node);
376
377struct spu *spu_alloc(void)
378{
379 struct spu *spu = NULL;
380 int node;
381
382 for (node = 0; node < MAX_NUMNODES; node++) {
383 spu = spu_alloc_node(node);
384 if (spu)
385 break;
386 }
374 387
375 return spu; 388 return spu;
376} 389}
377EXPORT_SYMBOL_GPL(spu_alloc);
378 390
379void spu_free(struct spu *spu) 391void spu_free(struct spu *spu)
380{ 392{
381 mutex_lock(&spu_mutex); 393 mutex_lock(&spu_mutex);
382 list_add_tail(&spu->list, &spu_list); 394 list_add_tail(&spu->list, &spu_list[spu->node]);
383 mutex_unlock(&spu_mutex); 395 mutex_unlock(&spu_mutex);
384} 396}
385EXPORT_SYMBOL_GPL(spu_free); 397EXPORT_SYMBOL_GPL(spu_free);
@@ -566,7 +578,7 @@ static void spu_unmap(struct spu *spu)
566} 578}
567 579
568/* This function shall be abstracted for HV platforms */ 580/* This function shall be abstracted for HV platforms */
569static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 581static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
570{ 582{
571 unsigned int isrc; 583 unsigned int isrc;
572 const u32 *tmp; 584 const u32 *tmp;
@@ -590,7 +602,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
590 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; 602 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
591} 603}
592 604
593static int __init spu_map_device(struct spu *spu, struct device_node *node) 605static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
594{ 606{
595 const char *prop; 607 const char *prop;
596 int ret; 608 int ret;
@@ -635,6 +647,88 @@ out:
635 return ret; 647 return ret;
636} 648}
637 649
650static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
651{
652 struct of_irq oirq;
653 int ret;
654 int i;
655
656 for (i=0; i < 3; i++) {
657 ret = of_irq_map_one(np, i, &oirq);
658 if (ret)
659 goto err;
660
661 ret = -EINVAL;
662 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
663 oirq.specifier, oirq.size);
664 if (spu->irqs[i] == NO_IRQ)
665 goto err;
666 }
667 return 0;
668
669err:
670 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
671 for (; i >= 0; i--) {
672 if (spu->irqs[i] != NO_IRQ)
673 irq_dispose_mapping(spu->irqs[i]);
674 }
675 return ret;
676}
677
678static int spu_map_resource(struct device_node *node, int nr,
679 void __iomem** virt, unsigned long *phys)
680{
681 struct resource resource = { };
682 int ret;
683
684 ret = of_address_to_resource(node, 0, &resource);
685 if (ret)
686 goto out;
687
688 if (phys)
689 *phys = resource.start;
690 *virt = ioremap(resource.start, resource.end - resource.start);
691 if (!*virt)
692 ret = -EINVAL;
693
694out:
695 return ret;
696}
697
698static int __init spu_map_device(struct spu *spu, struct device_node *node)
699{
700 int ret = -ENODEV;
701 spu->name = get_property(node, "name", NULL);
702 if (!spu->name)
703 goto out;
704
705 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
706 &spu->local_store_phys);
707 if (ret)
708 goto out;
709 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
710 &spu->problem_phys);
711 if (ret)
712 goto out_unmap;
713 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
714 NULL);
715 if (ret)
716 goto out_unmap;
717
718 if (!firmware_has_feature(FW_FEATURE_LPAR))
719 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
720 NULL);
721 if (ret)
722 goto out_unmap;
723 return 0;
724
725out_unmap:
726 spu_unmap(spu);
727out:
728 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
729 return ret;
730}
731
638struct sysdev_class spu_sysdev_class = { 732struct sysdev_class spu_sysdev_class = {
639 set_kset_name("spu") 733 set_kset_name("spu")
640}; 734};
@@ -688,6 +782,9 @@ static int __init create_spu(struct device_node *spe)
688 goto out; 782 goto out;
689 783
690 ret = spu_map_device(spu, spe); 784 ret = spu_map_device(spu, spe);
785 /* try old method */
786 if (ret)
787 ret = spu_map_device_old(spu, spe);
691 if (ret) 788 if (ret)
692 goto out_free; 789 goto out_free;
693 790
@@ -697,6 +794,8 @@ static int __init create_spu(struct device_node *spe)
697 spu->nid = 0; 794 spu->nid = 0;
698 ret = spu_map_interrupts(spu, spe); 795 ret = spu_map_interrupts(spu, spe);
699 if (ret) 796 if (ret)
797 ret = spu_map_interrupts_old(spu, spe);
798 if (ret)
700 goto out_unmap; 799 goto out_unmap;
701 spin_lock_init(&spu->register_lock); 800 spin_lock_init(&spu->register_lock);
702 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); 801 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
@@ -706,13 +805,13 @@ static int __init create_spu(struct device_node *spe)
706 spu->number = number++; 805 spu->number = number++;
707 ret = spu_request_irqs(spu); 806 ret = spu_request_irqs(spu);
708 if (ret) 807 if (ret)
709 goto out_unmap; 808 goto out_unlock;
710 809
711 ret = spu_create_sysdev(spu); 810 ret = spu_create_sysdev(spu);
712 if (ret) 811 if (ret)
713 goto out_free_irqs; 812 goto out_free_irqs;
714 813
715 list_add(&spu->list, &spu_list); 814 list_add(&spu->list, &spu_list[spu->node]);
716 mutex_unlock(&spu_mutex); 815 mutex_unlock(&spu_mutex);
717 816
718 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", 817 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
@@ -722,9 +821,9 @@ static int __init create_spu(struct device_node *spe)
722 821
723out_free_irqs: 822out_free_irqs:
724 spu_free_irqs(spu); 823 spu_free_irqs(spu);
725 824out_unlock:
726out_unmap:
727 mutex_unlock(&spu_mutex); 825 mutex_unlock(&spu_mutex);
826out_unmap:
728 spu_unmap(spu); 827 spu_unmap(spu);
729out_free: 828out_free:
730 kfree(spu); 829 kfree(spu);
@@ -745,9 +844,13 @@ static void destroy_spu(struct spu *spu)
745static void cleanup_spu_base(void) 844static void cleanup_spu_base(void)
746{ 845{
747 struct spu *spu, *tmp; 846 struct spu *spu, *tmp;
847 int node;
848
748 mutex_lock(&spu_mutex); 849 mutex_lock(&spu_mutex);
749 list_for_each_entry_safe(spu, tmp, &spu_list, list) 850 for (node = 0; node < MAX_NUMNODES; node++) {
750 destroy_spu(spu); 851 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
852 destroy_spu(spu);
853 }
751 mutex_unlock(&spu_mutex); 854 mutex_unlock(&spu_mutex);
752 sysdev_class_unregister(&spu_sysdev_class); 855 sysdev_class_unregister(&spu_sysdev_class);
753} 856}
@@ -756,13 +859,16 @@ module_exit(cleanup_spu_base);
756static int __init init_spu_base(void) 859static int __init init_spu_base(void)
757{ 860{
758 struct device_node *node; 861 struct device_node *node;
759 int ret; 862 int i, ret;
760 863
761 /* create sysdev class for spus */ 864 /* create sysdev class for spus */
762 ret = sysdev_class_register(&spu_sysdev_class); 865 ret = sysdev_class_register(&spu_sysdev_class);
763 if (ret) 866 if (ret)
764 return ret; 867 return ret;
765 868
869 for (i = 0; i < MAX_NUMNODES; i++)
870 INIT_LIST_HEAD(&spu_list[i]);
871
766 ret = -ENODEV; 872 ret = -ENODEV;
767 for (node = of_find_node_by_type(NULL, "spe"); 873 for (node = of_find_node_by_type(NULL, "spe");
768 node; node = of_find_node_by_type(node, "spe")) { 874 node; node = of_find_node_by_type(node, "spe")) {
@@ -774,18 +880,6 @@ static int __init init_spu_base(void)
774 break; 880 break;
775 } 881 }
776 } 882 }
777 /* in some old firmware versions, the spe is called 'spc', so we
778 look for that as well */
779 for (node = of_find_node_by_type(NULL, "spc");
780 node; node = of_find_node_by_type(node, "spc")) {
781 ret = create_spu(node);
782 if (ret) {
783 printk(KERN_WARNING "%s: Error initializing %s\n",
784 __FUNCTION__, node->name);
785 cleanup_spu_base();
786 break;
787 }
788 }
789 return ret; 883 return ret;
790} 884}
791module_init(init_spu_base); 885module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index bb5dc634272c..ecdfbb35f82e 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -2,7 +2,7 @@ obj-y += switch.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o 4spufs-y += inode.o file.o context.o syscalls.o
5spufs-y += sched.o backing_ops.o hw_ops.o run.o 5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
6 6
7# Rules to build switch.o with the help of SPU tool chain 7# Rules to build switch.o with the help of SPU tool chain
8SPU_CROSS := spu- 8SPU_CROSS := spu-
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 36439c5e9f2d..034cf6af53a2 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -27,7 +27,7 @@
27#include <asm/spu_csa.h> 27#include <asm/spu_csa.h>
28#include "spufs.h" 28#include "spufs.h"
29 29
30struct spu_context *alloc_spu_context(void) 30struct spu_context *alloc_spu_context(struct spu_gang *gang)
31{ 31{
32 struct spu_context *ctx; 32 struct spu_context *ctx;
33 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 33 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
@@ -51,6 +51,8 @@ struct spu_context *alloc_spu_context(void)
51 ctx->state = SPU_STATE_SAVED; 51 ctx->state = SPU_STATE_SAVED;
52 ctx->ops = &spu_backing_ops; 52 ctx->ops = &spu_backing_ops;
53 ctx->owner = get_task_mm(current); 53 ctx->owner = get_task_mm(current);
54 if (gang)
55 spu_gang_add_ctx(gang, ctx);
54 goto out; 56 goto out;
55out_free: 57out_free:
56 kfree(ctx); 58 kfree(ctx);
@@ -67,6 +69,8 @@ void destroy_spu_context(struct kref *kref)
67 spu_deactivate(ctx); 69 spu_deactivate(ctx);
68 up_write(&ctx->state_sema); 70 up_write(&ctx->state_sema);
69 spu_fini_csa(&ctx->csa); 71 spu_fini_csa(&ctx->csa);
72 if (ctx->gang)
73 spu_gang_remove_ctx(ctx->gang, ctx);
70 kfree(ctx); 74 kfree(ctx);
71} 75}
72 76
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 51fd197ab5dd..e0d730045260 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -36,6 +36,8 @@
36 36
37#include "spufs.h" 37#include "spufs.h"
38 38
39#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40
39 41
40static int 42static int
41spufs_mem_open(struct inode *inode, struct file *file) 43spufs_mem_open(struct inode *inode, struct file *file)
@@ -88,7 +90,6 @@ spufs_mem_write(struct file *file, const char __user *buffer,
88 return ret; 90 return ret;
89} 91}
90 92
91#ifdef CONFIG_SPUFS_MMAP
92static struct page * 93static struct page *
93spufs_mem_mmap_nopage(struct vm_area_struct *vma, 94spufs_mem_mmap_nopage(struct vm_area_struct *vma,
94 unsigned long address, int *type) 95 unsigned long address, int *type)
@@ -101,12 +102,16 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
101 102
102 spu_acquire(ctx); 103 spu_acquire(ctx);
103 104
104 if (ctx->state == SPU_STATE_SAVED) 105 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
105 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); 108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
106 else 109 } else {
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED);
107 page = pfn_to_page((ctx->spu->local_store_phys + offset) 112 page = pfn_to_page((ctx->spu->local_store_phys + offset)
108 >> PAGE_SHIFT); 113 >> PAGE_SHIFT);
109 114 }
110 spu_release(ctx); 115 spu_release(ctx);
111 116
112 if (type) 117 if (type)
@@ -133,22 +138,19 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
133 vma->vm_ops = &spufs_mem_mmap_vmops; 138 vma->vm_ops = &spufs_mem_mmap_vmops;
134 return 0; 139 return 0;
135} 140}
136#endif
137 141
138static struct file_operations spufs_mem_fops = { 142static struct file_operations spufs_mem_fops = {
139 .open = spufs_mem_open, 143 .open = spufs_mem_open,
140 .read = spufs_mem_read, 144 .read = spufs_mem_read,
141 .write = spufs_mem_write, 145 .write = spufs_mem_write,
142 .llseek = generic_file_llseek, 146 .llseek = generic_file_llseek,
143#ifdef CONFIG_SPUFS_MMAP
144 .mmap = spufs_mem_mmap, 147 .mmap = spufs_mem_mmap,
145#endif
146}; 148};
147 149
148#ifdef CONFIG_SPUFS_MMAP
149static struct page *spufs_ps_nopage(struct vm_area_struct *vma, 150static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
150 unsigned long address, 151 unsigned long address,
151 int *type, unsigned long ps_offs) 152 int *type, unsigned long ps_offs,
153 unsigned long ps_size)
152{ 154{
153 struct page *page = NOPAGE_SIGBUS; 155 struct page *page = NOPAGE_SIGBUS;
154 int fault_type = VM_FAULT_SIGBUS; 156 int fault_type = VM_FAULT_SIGBUS;
@@ -158,7 +160,7 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
158 int ret; 160 int ret;
159 161
160 offset += vma->vm_pgoff << PAGE_SHIFT; 162 offset += vma->vm_pgoff << PAGE_SHIFT;
161 if (offset >= 0x4000) 163 if (offset >= ps_size)
162 goto out; 164 goto out;
163 165
164 ret = spu_acquire_runnable(ctx); 166 ret = spu_acquire_runnable(ctx);
@@ -179,10 +181,11 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
179 return page; 181 return page;
180} 182}
181 183
184#if SPUFS_MMAP_4K
182static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma, 185static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
183 unsigned long address, int *type) 186 unsigned long address, int *type)
184{ 187{
185 return spufs_ps_nopage(vma, address, type, 0x4000); 188 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
186} 189}
187 190
188static struct vm_operations_struct spufs_cntl_mmap_vmops = { 191static struct vm_operations_struct spufs_cntl_mmap_vmops = {
@@ -191,17 +194,12 @@ static struct vm_operations_struct spufs_cntl_mmap_vmops = {
191 194
192/* 195/*
193 * mmap support for problem state control area [0x4000 - 0x4fff]. 196 * mmap support for problem state control area [0x4000 - 0x4fff].
194 * Mapping this area requires that the application have CAP_SYS_RAWIO,
195 * as these registers require special care when read/writing.
196 */ 197 */
197static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 198static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
198{ 199{
199 if (!(vma->vm_flags & VM_SHARED)) 200 if (!(vma->vm_flags & VM_SHARED))
200 return -EINVAL; 201 return -EINVAL;
201 202
202 if (!capable(CAP_SYS_RAWIO))
203 return -EPERM;
204
205 vma->vm_flags |= VM_RESERVED; 203 vma->vm_flags |= VM_RESERVED;
206 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
207 | _PAGE_NO_CACHE | _PAGE_GUARDED); 205 | _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -209,42 +207,48 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
209 vma->vm_ops = &spufs_cntl_mmap_vmops; 207 vma->vm_ops = &spufs_cntl_mmap_vmops;
210 return 0; 208 return 0;
211} 209}
212#endif 210#else /* SPUFS_MMAP_4K */
211#define spufs_cntl_mmap NULL
212#endif /* !SPUFS_MMAP_4K */
213 213
214static int spufs_cntl_open(struct inode *inode, struct file *file) 214static u64 spufs_cntl_get(void *data)
215{ 215{
216 struct spufs_inode_info *i = SPUFS_I(inode); 216 struct spu_context *ctx = data;
217 struct spu_context *ctx = i->i_ctx; 217 u64 val;
218 218
219 file->private_data = ctx; 219 spu_acquire(ctx);
220 file->f_mapping = inode->i_mapping; 220 val = ctx->ops->status_read(ctx);
221 ctx->cntl = inode->i_mapping; 221 spu_release(ctx);
222 return 0; 222
223 return val;
223} 224}
224 225
225static ssize_t 226static void spufs_cntl_set(void *data, u64 val)
226spufs_cntl_read(struct file *file, char __user *buffer,
227 size_t size, loff_t *pos)
228{ 227{
229 /* FIXME: read from spu status */ 228 struct spu_context *ctx = data;
230 return -EINVAL; 229
230 spu_acquire(ctx);
231 ctx->ops->runcntl_write(ctx, val);
232 spu_release(ctx);
231} 233}
232 234
233static ssize_t 235static int spufs_cntl_open(struct inode *inode, struct file *file)
234spufs_cntl_write(struct file *file, const char __user *buffer,
235 size_t size, loff_t *pos)
236{ 236{
237 /* FIXME: write to runctl bit */ 237 struct spufs_inode_info *i = SPUFS_I(inode);
238 return -EINVAL; 238 struct spu_context *ctx = i->i_ctx;
239
240 file->private_data = ctx;
241 file->f_mapping = inode->i_mapping;
242 ctx->cntl = inode->i_mapping;
243 return simple_attr_open(inode, file, spufs_cntl_get,
244 spufs_cntl_set, "0x%08lx");
239} 245}
240 246
241static struct file_operations spufs_cntl_fops = { 247static struct file_operations spufs_cntl_fops = {
242 .open = spufs_cntl_open, 248 .open = spufs_cntl_open,
243 .read = spufs_cntl_read, 249 .read = simple_attr_read,
244 .write = spufs_cntl_write, 250 .write = simple_attr_write,
245#ifdef CONFIG_SPUFS_MMAP
246 .mmap = spufs_cntl_mmap, 251 .mmap = spufs_cntl_mmap,
247#endif
248}; 252};
249 253
250static int 254static int
@@ -356,27 +360,54 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
356 return nonseekable_open(inode, file); 360 return nonseekable_open(inode, file);
357} 361}
358 362
363/*
364 * Read as many bytes from the mailbox as possible, until
365 * one of the conditions becomes true:
366 *
367 * - no more data available in the mailbox
368 * - end of the user provided buffer
369 * - end of the mapped area
370 */
359static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 371static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
360 size_t len, loff_t *pos) 372 size_t len, loff_t *pos)
361{ 373{
362 struct spu_context *ctx = file->private_data; 374 struct spu_context *ctx = file->private_data;
363 u32 mbox_data; 375 u32 mbox_data, __user *udata;
364 int ret; 376 ssize_t count;
365 377
366 if (len < 4) 378 if (len < 4)
367 return -EINVAL; 379 return -EINVAL;
368 380
381 if (!access_ok(VERIFY_WRITE, buf, len))
382 return -EFAULT;
383
384 udata = (void __user *)buf;
385
369 spu_acquire(ctx); 386 spu_acquire(ctx);
370 ret = ctx->ops->mbox_read(ctx, &mbox_data); 387 for (count = 0; count <= len; count += 4, udata++) {
388 int ret;
389 ret = ctx->ops->mbox_read(ctx, &mbox_data);
390 if (ret == 0)
391 break;
392
393 /*
394 * at the end of the mapped area, we can fault
395 * but still need to return the data we have
396 * read successfully so far.
397 */
398 ret = __put_user(mbox_data, udata);
399 if (ret) {
400 if (!count)
401 count = -EFAULT;
402 break;
403 }
404 }
371 spu_release(ctx); 405 spu_release(ctx);
372 406
373 if (!ret) 407 if (!count)
374 return -EAGAIN; 408 count = -EAGAIN;
375
376 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
377 return -EFAULT;
378 409
379 return 4; 410 return count;
380} 411}
381 412
382static struct file_operations spufs_mbox_fops = { 413static struct file_operations spufs_mbox_fops = {
@@ -432,36 +463,70 @@ void spufs_ibox_callback(struct spu *spu)
432 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 463 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
433} 464}
434 465
466/*
467 * Read as many bytes from the interrupt mailbox as possible, until
468 * one of the conditions becomes true:
469 *
470 * - no more data available in the mailbox
471 * - end of the user provided buffer
472 * - end of the mapped area
473 *
474 * If the file is opened without O_NONBLOCK, we wait here until
475 * any data is available, but return when we have been able to
476 * read something.
477 */
435static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 478static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
436 size_t len, loff_t *pos) 479 size_t len, loff_t *pos)
437{ 480{
438 struct spu_context *ctx = file->private_data; 481 struct spu_context *ctx = file->private_data;
439 u32 ibox_data; 482 u32 ibox_data, __user *udata;
440 ssize_t ret; 483 ssize_t count;
441 484
442 if (len < 4) 485 if (len < 4)
443 return -EINVAL; 486 return -EINVAL;
444 487
488 if (!access_ok(VERIFY_WRITE, buf, len))
489 return -EFAULT;
490
491 udata = (void __user *)buf;
492
445 spu_acquire(ctx); 493 spu_acquire(ctx);
446 494
447 ret = 0; 495 /* wait only for the first element */
496 count = 0;
448 if (file->f_flags & O_NONBLOCK) { 497 if (file->f_flags & O_NONBLOCK) {
449 if (!spu_ibox_read(ctx, &ibox_data)) 498 if (!spu_ibox_read(ctx, &ibox_data))
450 ret = -EAGAIN; 499 count = -EAGAIN;
451 } else { 500 } else {
452 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 501 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
453 } 502 }
503 if (count)
504 goto out;
454 505
455 spu_release(ctx); 506 /* if we can't write at all, return -EFAULT */
507 count = __put_user(ibox_data, udata);
508 if (count)
509 goto out;
456 510
457 if (ret) 511 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
458 return ret; 512 int ret;
513 ret = ctx->ops->ibox_read(ctx, &ibox_data);
514 if (ret == 0)
515 break;
516 /*
517 * at the end of the mapped area, we can fault
518 * but still need to return the data we have
519 * read successfully so far.
520 */
521 ret = __put_user(ibox_data, udata);
522 if (ret)
523 break;
524 }
459 525
460 ret = 4; 526out:
461 if (copy_to_user(buf, &ibox_data, sizeof ibox_data)) 527 spu_release(ctx);
462 ret = -EFAULT;
463 528
464 return ret; 529 return count;
465} 530}
466 531
467static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 532static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
@@ -534,32 +599,67 @@ void spufs_wbox_callback(struct spu *spu)
534 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 599 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
535} 600}
536 601
602/*
603 * Write as many bytes to the interrupt mailbox as possible, until
604 * one of the conditions becomes true:
605 *
606 * - the mailbox is full
607 * - end of the user provided buffer
608 * - end of the mapped area
609 *
610 * If the file is opened without O_NONBLOCK, we wait here until
611 * space is availabyl, but return when we have been able to
612 * write something.
613 */
537static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 614static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
538 size_t len, loff_t *pos) 615 size_t len, loff_t *pos)
539{ 616{
540 struct spu_context *ctx = file->private_data; 617 struct spu_context *ctx = file->private_data;
541 u32 wbox_data; 618 u32 wbox_data, __user *udata;
542 int ret; 619 ssize_t count;
543 620
544 if (len < 4) 621 if (len < 4)
545 return -EINVAL; 622 return -EINVAL;
546 623
547 if (copy_from_user(&wbox_data, buf, sizeof wbox_data)) 624 udata = (void __user *)buf;
625 if (!access_ok(VERIFY_READ, buf, len))
626 return -EFAULT;
627
628 if (__get_user(wbox_data, udata))
548 return -EFAULT; 629 return -EFAULT;
549 630
550 spu_acquire(ctx); 631 spu_acquire(ctx);
551 632
552 ret = 0; 633 /*
634 * make sure we can at least write one element, by waiting
635 * in case of !O_NONBLOCK
636 */
637 count = 0;
553 if (file->f_flags & O_NONBLOCK) { 638 if (file->f_flags & O_NONBLOCK) {
554 if (!spu_wbox_write(ctx, wbox_data)) 639 if (!spu_wbox_write(ctx, wbox_data))
555 ret = -EAGAIN; 640 count = -EAGAIN;
556 } else { 641 } else {
557 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 642 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
558 } 643 }
559 644
560 spu_release(ctx); 645 if (count)
646 goto out;
647
648 /* write aѕ much as possible */
649 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
650 int ret;
651 ret = __get_user(wbox_data, udata);
652 if (ret)
653 break;
654
655 ret = spu_wbox_write(ctx, wbox_data);
656 if (ret == 0)
657 break;
658 }
561 659
562 return ret ? ret : sizeof wbox_data; 660out:
661 spu_release(ctx);
662 return count;
563} 663}
564 664
565static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 665static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
@@ -657,11 +757,19 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
657 return 4; 757 return 4;
658} 758}
659 759
660#ifdef CONFIG_SPUFS_MMAP
661static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma, 760static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
662 unsigned long address, int *type) 761 unsigned long address, int *type)
663{ 762{
664 return spufs_ps_nopage(vma, address, type, 0x14000); 763#if PAGE_SIZE == 0x1000
764 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
765#elif PAGE_SIZE == 0x10000
766 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
767 * signal 1 and 2 area
768 */
769 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
770#else
771#error unsupported page size
772#endif
665} 773}
666 774
667static struct vm_operations_struct spufs_signal1_mmap_vmops = { 775static struct vm_operations_struct spufs_signal1_mmap_vmops = {
@@ -680,15 +788,12 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
680 vma->vm_ops = &spufs_signal1_mmap_vmops; 788 vma->vm_ops = &spufs_signal1_mmap_vmops;
681 return 0; 789 return 0;
682} 790}
683#endif
684 791
685static struct file_operations spufs_signal1_fops = { 792static struct file_operations spufs_signal1_fops = {
686 .open = spufs_signal1_open, 793 .open = spufs_signal1_open,
687 .read = spufs_signal1_read, 794 .read = spufs_signal1_read,
688 .write = spufs_signal1_write, 795 .write = spufs_signal1_write,
689#ifdef CONFIG_SPUFS_MMAP
690 .mmap = spufs_signal1_mmap, 796 .mmap = spufs_signal1_mmap,
691#endif
692}; 797};
693 798
694static int spufs_signal2_open(struct inode *inode, struct file *file) 799static int spufs_signal2_open(struct inode *inode, struct file *file)
@@ -743,11 +848,20 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
743 return 4; 848 return 4;
744} 849}
745 850
746#ifdef CONFIG_SPUFS_MMAP 851#if SPUFS_MMAP_4K
747static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma, 852static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
748 unsigned long address, int *type) 853 unsigned long address, int *type)
749{ 854{
750 return spufs_ps_nopage(vma, address, type, 0x1c000); 855#if PAGE_SIZE == 0x1000
856 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
857#elif PAGE_SIZE == 0x10000
858 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
859 * signal 1 and 2 area
860 */
861 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
862#else
863#error unsupported page size
864#endif
751} 865}
752 866
753static struct vm_operations_struct spufs_signal2_mmap_vmops = { 867static struct vm_operations_struct spufs_signal2_mmap_vmops = {
@@ -767,15 +881,15 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
767 vma->vm_ops = &spufs_signal2_mmap_vmops; 881 vma->vm_ops = &spufs_signal2_mmap_vmops;
768 return 0; 882 return 0;
769} 883}
770#endif 884#else /* SPUFS_MMAP_4K */
885#define spufs_signal2_mmap NULL
886#endif /* !SPUFS_MMAP_4K */
771 887
772static struct file_operations spufs_signal2_fops = { 888static struct file_operations spufs_signal2_fops = {
773 .open = spufs_signal2_open, 889 .open = spufs_signal2_open,
774 .read = spufs_signal2_read, 890 .read = spufs_signal2_read,
775 .write = spufs_signal2_write, 891 .write = spufs_signal2_write,
776#ifdef CONFIG_SPUFS_MMAP
777 .mmap = spufs_signal2_mmap, 892 .mmap = spufs_signal2_mmap,
778#endif
779}; 893};
780 894
781static void spufs_signal1_type_set(void *data, u64 val) 895static void spufs_signal1_type_set(void *data, u64 val)
@@ -824,11 +938,11 @@ static u64 spufs_signal2_type_get(void *data)
824DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 938DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
825 spufs_signal2_type_set, "%llu"); 939 spufs_signal2_type_set, "%llu");
826 940
827#ifdef CONFIG_SPUFS_MMAP 941#if SPUFS_MMAP_4K
828static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma, 942static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
829 unsigned long address, int *type) 943 unsigned long address, int *type)
830{ 944{
831 return spufs_ps_nopage(vma, address, type, 0x0000); 945 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
832} 946}
833 947
834static struct vm_operations_struct spufs_mss_mmap_vmops = { 948static struct vm_operations_struct spufs_mss_mmap_vmops = {
@@ -837,17 +951,12 @@ static struct vm_operations_struct spufs_mss_mmap_vmops = {
837 951
838/* 952/*
839 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 953 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
840 * Mapping this area requires that the application have CAP_SYS_RAWIO,
841 * as these registers require special care when read/writing.
842 */ 954 */
843static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 955static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
844{ 956{
845 if (!(vma->vm_flags & VM_SHARED)) 957 if (!(vma->vm_flags & VM_SHARED))
846 return -EINVAL; 958 return -EINVAL;
847 959
848 if (!capable(CAP_SYS_RAWIO))
849 return -EPERM;
850
851 vma->vm_flags |= VM_RESERVED; 960 vma->vm_flags |= VM_RESERVED;
852 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 961 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
853 | _PAGE_NO_CACHE | _PAGE_GUARDED); 962 | _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -855,7 +964,9 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
855 vma->vm_ops = &spufs_mss_mmap_vmops; 964 vma->vm_ops = &spufs_mss_mmap_vmops;
856 return 0; 965 return 0;
857} 966}
858#endif 967#else /* SPUFS_MMAP_4K */
968#define spufs_mss_mmap NULL
969#endif /* !SPUFS_MMAP_4K */
859 970
860static int spufs_mss_open(struct inode *inode, struct file *file) 971static int spufs_mss_open(struct inode *inode, struct file *file)
861{ 972{
@@ -867,17 +978,54 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
867 978
868static struct file_operations spufs_mss_fops = { 979static struct file_operations spufs_mss_fops = {
869 .open = spufs_mss_open, 980 .open = spufs_mss_open,
870#ifdef CONFIG_SPUFS_MMAP
871 .mmap = spufs_mss_mmap, 981 .mmap = spufs_mss_mmap,
872#endif 982};
983
984static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
985 unsigned long address, int *type)
986{
987 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
988}
989
990static struct vm_operations_struct spufs_psmap_mmap_vmops = {
991 .nopage = spufs_psmap_mmap_nopage,
992};
993
994/*
995 * mmap support for full problem state area [0x00000 - 0x1ffff].
996 */
997static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
998{
999 if (!(vma->vm_flags & VM_SHARED))
1000 return -EINVAL;
1001
1002 vma->vm_flags |= VM_RESERVED;
1003 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1004 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1005
1006 vma->vm_ops = &spufs_psmap_mmap_vmops;
1007 return 0;
1008}
1009
1010static int spufs_psmap_open(struct inode *inode, struct file *file)
1011{
1012 struct spufs_inode_info *i = SPUFS_I(inode);
1013
1014 file->private_data = i->i_ctx;
1015 return nonseekable_open(inode, file);
1016}
1017
1018static struct file_operations spufs_psmap_fops = {
1019 .open = spufs_psmap_open,
1020 .mmap = spufs_psmap_mmap,
873}; 1021};
874 1022
875 1023
876#ifdef CONFIG_SPUFS_MMAP 1024#if SPUFS_MMAP_4K
877static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma, 1025static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
878 unsigned long address, int *type) 1026 unsigned long address, int *type)
879{ 1027{
880 return spufs_ps_nopage(vma, address, type, 0x3000); 1028 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
881} 1029}
882 1030
883static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1031static struct vm_operations_struct spufs_mfc_mmap_vmops = {
@@ -886,17 +1034,12 @@ static struct vm_operations_struct spufs_mfc_mmap_vmops = {
886 1034
887/* 1035/*
888 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1036 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
889 * Mapping this area requires that the application have CAP_SYS_RAWIO,
890 * as these registers require special care when read/writing.
891 */ 1037 */
892static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1038static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
893{ 1039{
894 if (!(vma->vm_flags & VM_SHARED)) 1040 if (!(vma->vm_flags & VM_SHARED))
895 return -EINVAL; 1041 return -EINVAL;
896 1042
897 if (!capable(CAP_SYS_RAWIO))
898 return -EPERM;
899
900 vma->vm_flags |= VM_RESERVED; 1043 vma->vm_flags |= VM_RESERVED;
901 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1044 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
902 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1045 | _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -904,7 +1047,9 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
904 vma->vm_ops = &spufs_mfc_mmap_vmops; 1047 vma->vm_ops = &spufs_mfc_mmap_vmops;
905 return 0; 1048 return 0;
906} 1049}
907#endif 1050#else /* SPUFS_MMAP_4K */
1051#define spufs_mfc_mmap NULL
1052#endif /* !SPUFS_MMAP_4K */
908 1053
909static int spufs_mfc_open(struct inode *inode, struct file *file) 1054static int spufs_mfc_open(struct inode *inode, struct file *file)
910{ 1055{
@@ -1194,9 +1339,7 @@ static struct file_operations spufs_mfc_fops = {
1194 .flush = spufs_mfc_flush, 1339 .flush = spufs_mfc_flush,
1195 .fsync = spufs_mfc_fsync, 1340 .fsync = spufs_mfc_fsync,
1196 .fasync = spufs_mfc_fasync, 1341 .fasync = spufs_mfc_fasync,
1197#ifdef CONFIG_SPUFS_MMAP
1198 .mmap = spufs_mfc_mmap, 1342 .mmap = spufs_mfc_mmap,
1199#endif
1200}; 1343};
1201 1344
1202static void spufs_npc_set(void *data, u64 val) 1345static void spufs_npc_set(void *data, u64 val)
@@ -1344,6 +1487,21 @@ static u64 spufs_id_get(void *data)
1344} 1487}
1345DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n") 1488DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1346 1489
1490static u64 spufs_object_id_get(void *data)
1491{
1492 struct spu_context *ctx = data;
1493 return ctx->object_id;
1494}
1495
1496static void spufs_object_id_set(void *data, u64 id)
1497{
1498 struct spu_context *ctx = data;
1499 ctx->object_id = id;
1500}
1501
1502DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1503 spufs_object_id_set, "0x%llx\n");
1504
1347struct tree_descr spufs_dir_contents[] = { 1505struct tree_descr spufs_dir_contents[] = {
1348 { "mem", &spufs_mem_fops, 0666, }, 1506 { "mem", &spufs_mem_fops, 0666, },
1349 { "regs", &spufs_regs_fops, 0666, }, 1507 { "regs", &spufs_regs_fops, 0666, },
@@ -1367,6 +1525,8 @@ struct tree_descr spufs_dir_contents[] = {
1367 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, }, 1525 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1368 { "event_mask", &spufs_event_mask_ops, 0666, }, 1526 { "event_mask", &spufs_event_mask_ops, 0666, },
1369 { "srr0", &spufs_srr0_ops, 0666, }, 1527 { "srr0", &spufs_srr0_ops, 0666, },
1528 { "psmap", &spufs_psmap_fops, 0666, },
1370 { "phys-id", &spufs_id_ops, 0666, }, 1529 { "phys-id", &spufs_id_ops, 0666, },
1530 { "object-id", &spufs_object_id_ops, 0666, },
1371 {}, 1531 {},
1372}; 1532};
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
new file mode 100644
index 000000000000..212ea78f9051
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -0,0 +1,81 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/list.h>
24#include <linux/slab.h>
25
26#include "spufs.h"
27
28struct spu_gang *alloc_spu_gang(void)
29{
30 struct spu_gang *gang;
31
32 gang = kzalloc(sizeof *gang, GFP_KERNEL);
33 if (!gang)
34 goto out;
35
36 kref_init(&gang->kref);
37 mutex_init(&gang->mutex);
38 INIT_LIST_HEAD(&gang->list);
39
40out:
41 return gang;
42}
43
44static void destroy_spu_gang(struct kref *kref)
45{
46 struct spu_gang *gang;
47 gang = container_of(kref, struct spu_gang, kref);
48 WARN_ON(gang->contexts || !list_empty(&gang->list));
49 kfree(gang);
50}
51
52struct spu_gang *get_spu_gang(struct spu_gang *gang)
53{
54 kref_get(&gang->kref);
55 return gang;
56}
57
58int put_spu_gang(struct spu_gang *gang)
59{
60 return kref_put(&gang->kref, &destroy_spu_gang);
61}
62
63void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx)
64{
65 mutex_lock(&gang->mutex);
66 ctx->gang = get_spu_gang(gang);
67 list_add(&ctx->gang_list, &gang->list);
68 gang->contexts++;
69 mutex_unlock(&gang->mutex);
70}
71
72void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
73{
74 mutex_lock(&gang->mutex);
75 WARN_ON(ctx->gang != gang);
76 list_del_init(&ctx->gang_list);
77 gang->contexts--;
78 mutex_unlock(&gang->mutex);
79
80 put_spu_gang(gang);
81}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3950ddccb2c8..427d00a4f6a0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -50,6 +50,10 @@ spufs_alloc_inode(struct super_block *sb)
50 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); 50 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
51 if (!ei) 51 if (!ei)
52 return NULL; 52 return NULL;
53
54 ei->i_gang = NULL;
55 ei->i_ctx = NULL;
56
53 return &ei->vfs_inode; 57 return &ei->vfs_inode;
54} 58}
55 59
@@ -128,14 +132,19 @@ out:
128static void 132static void
129spufs_delete_inode(struct inode *inode) 133spufs_delete_inode(struct inode *inode)
130{ 134{
131 if (SPUFS_I(inode)->i_ctx) 135 struct spufs_inode_info *ei = SPUFS_I(inode);
132 put_spu_context(SPUFS_I(inode)->i_ctx); 136
137 if (ei->i_ctx)
138 put_spu_context(ei->i_ctx);
139 if (ei->i_gang)
140 put_spu_gang(ei->i_gang);
133 clear_inode(inode); 141 clear_inode(inode);
134} 142}
135 143
136static void spufs_prune_dir(struct dentry *dir) 144static void spufs_prune_dir(struct dentry *dir)
137{ 145{
138 struct dentry *dentry, *tmp; 146 struct dentry *dentry, *tmp;
147
139 mutex_lock(&dir->d_inode->i_mutex); 148 mutex_lock(&dir->d_inode->i_mutex);
140 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { 149 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
141 spin_lock(&dcache_lock); 150 spin_lock(&dcache_lock);
@@ -156,13 +165,13 @@ static void spufs_prune_dir(struct dentry *dir)
156 mutex_unlock(&dir->d_inode->i_mutex); 165 mutex_unlock(&dir->d_inode->i_mutex);
157} 166}
158 167
159/* Caller must hold root->i_mutex */ 168/* Caller must hold parent->i_mutex */
160static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry) 169static int spufs_rmdir(struct inode *parent, struct dentry *dir)
161{ 170{
162 /* remove all entries */ 171 /* remove all entries */
163 spufs_prune_dir(dir_dentry); 172 spufs_prune_dir(dir);
164 173
165 return simple_rmdir(root, dir_dentry); 174 return simple_rmdir(parent, dir);
166} 175}
167 176
168static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, 177static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
@@ -191,17 +200,17 @@ out:
191static int spufs_dir_close(struct inode *inode, struct file *file) 200static int spufs_dir_close(struct inode *inode, struct file *file)
192{ 201{
193 struct spu_context *ctx; 202 struct spu_context *ctx;
194 struct inode *dir; 203 struct inode *parent;
195 struct dentry *dentry; 204 struct dentry *dir;
196 int ret; 205 int ret;
197 206
198 dentry = file->f_dentry; 207 dir = file->f_dentry;
199 dir = dentry->d_parent->d_inode; 208 parent = dir->d_parent->d_inode;
200 ctx = SPUFS_I(dentry->d_inode)->i_ctx; 209 ctx = SPUFS_I(dir->d_inode)->i_ctx;
201 210
202 mutex_lock(&dir->i_mutex); 211 mutex_lock(&parent->i_mutex);
203 ret = spufs_rmdir(dir, dentry); 212 ret = spufs_rmdir(parent, dir);
204 mutex_unlock(&dir->i_mutex); 213 mutex_unlock(&parent->i_mutex);
205 WARN_ON(ret); 214 WARN_ON(ret);
206 215
207 /* We have to give up the mm_struct */ 216 /* We have to give up the mm_struct */
@@ -224,7 +233,8 @@ struct file_operations spufs_context_fops = {
224}; 233};
225 234
226static int 235static int
227spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 236spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
237 int mode)
228{ 238{
229 int ret; 239 int ret;
230 struct inode *inode; 240 struct inode *inode;
@@ -239,11 +249,13 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
239 inode->i_gid = dir->i_gid; 249 inode->i_gid = dir->i_gid;
240 inode->i_mode &= S_ISGID; 250 inode->i_mode &= S_ISGID;
241 } 251 }
242 ctx = alloc_spu_context(); 252 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
243 SPUFS_I(inode)->i_ctx = ctx; 253 SPUFS_I(inode)->i_ctx = ctx;
244 if (!ctx) 254 if (!ctx)
245 goto out_iput; 255 goto out_iput;
246 256
257 ctx->flags = flags;
258
247 inode->i_op = &spufs_dir_inode_operations; 259 inode->i_op = &spufs_dir_inode_operations;
248 inode->i_fop = &simple_dir_operations; 260 inode->i_fop = &simple_dir_operations;
249 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); 261 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
@@ -289,24 +301,177 @@ out:
289 return ret; 301 return ret;
290} 302}
291 303
304static int spufs_create_context(struct inode *inode,
305 struct dentry *dentry,
306 struct vfsmount *mnt, int flags, int mode)
307{
308 int ret;
309
310 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
311 if (ret)
312 goto out_unlock;
313
314 /*
315 * get references for dget and mntget, will be released
316 * in error path of *_open().
317 */
318 ret = spufs_context_open(dget(dentry), mntget(mnt));
319 if (ret < 0) {
320 WARN_ON(spufs_rmdir(inode, dentry));
321 mutex_unlock(&inode->i_mutex);
322 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
323 goto out;
324 }
325
326out_unlock:
327 mutex_unlock(&inode->i_mutex);
328out:
329 dput(dentry);
330 return ret;
331}
332
333static int spufs_rmgang(struct inode *root, struct dentry *dir)
334{
335 /* FIXME: this fails if the dir is not empty,
336 which causes a leak of gangs. */
337 return simple_rmdir(root, dir);
338}
339
340static int spufs_gang_close(struct inode *inode, struct file *file)
341{
342 struct inode *parent;
343 struct dentry *dir;
344 int ret;
345
346 dir = file->f_dentry;
347 parent = dir->d_parent->d_inode;
348
349 ret = spufs_rmgang(parent, dir);
350 WARN_ON(ret);
351
352 return dcache_dir_close(inode, file);
353}
354
355struct file_operations spufs_gang_fops = {
356 .open = dcache_dir_open,
357 .release = spufs_gang_close,
358 .llseek = dcache_dir_lseek,
359 .read = generic_read_dir,
360 .readdir = dcache_readdir,
361 .fsync = simple_sync_file,
362};
363
364static int
365spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
366{
367 int ret;
368 struct inode *inode;
369 struct spu_gang *gang;
370
371 ret = -ENOSPC;
372 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
373 if (!inode)
374 goto out;
375
376 ret = 0;
377 if (dir->i_mode & S_ISGID) {
378 inode->i_gid = dir->i_gid;
379 inode->i_mode &= S_ISGID;
380 }
381 gang = alloc_spu_gang();
382 SPUFS_I(inode)->i_ctx = NULL;
383 SPUFS_I(inode)->i_gang = gang;
384 if (!gang)
385 goto out_iput;
386
387 inode->i_op = &spufs_dir_inode_operations;
388 inode->i_fop = &simple_dir_operations;
389
390 d_instantiate(dentry, inode);
391 dget(dentry);
392 dir->i_nlink++;
393 dentry->d_inode->i_nlink++;
394 return ret;
395
396out_iput:
397 iput(inode);
398out:
399 return ret;
400}
401
402static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
403{
404 int ret;
405 struct file *filp;
406
407 ret = get_unused_fd();
408 if (ret < 0) {
409 dput(dentry);
410 mntput(mnt);
411 goto out;
412 }
413
414 filp = dentry_open(dentry, mnt, O_RDONLY);
415 if (IS_ERR(filp)) {
416 put_unused_fd(ret);
417 ret = PTR_ERR(filp);
418 goto out;
419 }
420
421 filp->f_op = &spufs_gang_fops;
422 fd_install(ret, filp);
423out:
424 return ret;
425}
426
427static int spufs_create_gang(struct inode *inode,
428 struct dentry *dentry,
429 struct vfsmount *mnt, int mode)
430{
431 int ret;
432
433 ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
434 if (ret)
435 goto out;
436
437 /*
438 * get references for dget and mntget, will be released
439 * in error path of *_open().
440 */
441 ret = spufs_gang_open(dget(dentry), mntget(mnt));
442 if (ret < 0)
443 WARN_ON(spufs_rmgang(inode, dentry));
444
445out:
446 mutex_unlock(&inode->i_mutex);
447 dput(dentry);
448 return ret;
449}
450
451
292static struct file_system_type spufs_type; 452static struct file_system_type spufs_type;
293 453
294long spufs_create_thread(struct nameidata *nd, 454long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
295 unsigned int flags, mode_t mode)
296{ 455{
297 struct dentry *dentry; 456 struct dentry *dentry;
298 int ret; 457 int ret;
299 458
300 /* need to be at the root of spufs */
301 ret = -EINVAL; 459 ret = -EINVAL;
302 if (nd->dentry->d_sb->s_type != &spufs_type || 460 /* check if we are on spufs */
303 nd->dentry != nd->dentry->d_sb->s_root) 461 if (nd->dentry->d_sb->s_type != &spufs_type)
304 goto out; 462 goto out;
305 463
306 /* all flags are reserved */ 464 /* don't accept undefined flags */
307 if (flags) 465 if (flags & (~SPU_CREATE_FLAG_ALL))
308 goto out; 466 goto out;
309 467
468 /* only threads can be underneath a gang */
469 if (nd->dentry != nd->dentry->d_sb->s_root) {
470 if ((flags & SPU_CREATE_GANG) ||
471 !SPUFS_I(nd->dentry->d_inode)->i_gang)
472 goto out;
473 }
474
310 dentry = lookup_create(nd, 1); 475 dentry = lookup_create(nd, 1);
311 ret = PTR_ERR(dentry); 476 ret = PTR_ERR(dentry);
312 if (IS_ERR(dentry)) 477 if (IS_ERR(dentry))
@@ -317,22 +482,13 @@ long spufs_create_thread(struct nameidata *nd,
317 goto out_dput; 482 goto out_dput;
318 483
319 mode &= ~current->fs->umask; 484 mode &= ~current->fs->umask;
320 ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
321 if (ret)
322 goto out_dput;
323 485
324 /* 486 if (flags & SPU_CREATE_GANG)
325 * get references for dget and mntget, will be released 487 return spufs_create_gang(nd->dentry->d_inode,
326 * in error path of *_open(). 488 dentry, nd->mnt, mode);
327 */ 489 else
328 ret = spufs_context_open(dget(dentry), mntget(nd->mnt)); 490 return spufs_create_context(nd->dentry->d_inode,
329 if (ret < 0) { 491 dentry, nd->mnt, flags, mode);
330 WARN_ON(spufs_rmdir(nd->dentry->d_inode, dentry));
331 mutex_unlock(&nd->dentry->d_inode->i_mutex);
332 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
333 dput(dentry);
334 goto out;
335 }
336 492
337out_dput: 493out_dput:
338 dput(dentry); 494 dput(dentry);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 483c8b76232c..63df8cf4ba16 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -14,6 +14,26 @@ void spufs_stop_callback(struct spu *spu)
14 wake_up_all(&ctx->stop_wq); 14 wake_up_all(&ctx->stop_wq);
15} 15}
16 16
17void spufs_dma_callback(struct spu *spu, int type)
18{
19 struct spu_context *ctx = spu->ctx;
20
21 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
22 ctx->event_return |= type;
23 wake_up_all(&ctx->stop_wq);
24 } else {
25 switch (type) {
26 case SPE_EVENT_DMA_ALIGNMENT:
27 case SPE_EVENT_INVALID_DMA:
28 force_sig(SIGBUS, /* info, */ current);
29 break;
30 case SPE_EVENT_SPE_ERROR:
31 force_sig(SIGILL, /* info */ current);
32 break;
33 }
34 }
35}
36
17static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 37static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
18{ 38{
19 struct spu *spu; 39 struct spu *spu;
@@ -28,8 +48,7 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
28 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 48 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
29} 49}
30 50
31static inline int spu_run_init(struct spu_context *ctx, u32 * npc, 51static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
32 u32 * status)
33{ 52{
34 int ret; 53 int ret;
35 54
@@ -72,7 +91,7 @@ static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
72 SPU_STATUS_STOPPED_BY_HALT)) { 91 SPU_STATUS_STOPPED_BY_HALT)) {
73 return *status; 92 return *status;
74 } 93 }
75 if ((ret = spu_run_init(ctx, npc, status)) != 0) 94 if ((ret = spu_run_init(ctx, npc)) != 0)
76 return ret; 95 return ret;
77 return 0; 96 return 0;
78} 97}
@@ -177,46 +196,49 @@ static inline int spu_process_events(struct spu_context *ctx)
177} 196}
178 197
179long spufs_run_spu(struct file *file, struct spu_context *ctx, 198long spufs_run_spu(struct file *file, struct spu_context *ctx,
180 u32 * npc, u32 * status) 199 u32 *npc, u32 *event)
181{ 200{
182 int ret; 201 int ret;
202 u32 status;
183 203
184 if (down_interruptible(&ctx->run_sema)) 204 if (down_interruptible(&ctx->run_sema))
185 return -ERESTARTSYS; 205 return -ERESTARTSYS;
186 206
187 ret = spu_run_init(ctx, npc, status); 207 ctx->event_return = 0;
208 ret = spu_run_init(ctx, npc);
188 if (ret) 209 if (ret)
189 goto out; 210 goto out;
190 211
191 do { 212 do {
192 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status)); 213 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
193 if (unlikely(ret)) 214 if (unlikely(ret))
194 break; 215 break;
195 if ((*status & SPU_STATUS_STOPPED_BY_STOP) && 216 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
196 (*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 217 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
197 ret = spu_process_callback(ctx); 218 ret = spu_process_callback(ctx);
198 if (ret) 219 if (ret)
199 break; 220 break;
200 *status &= ~SPU_STATUS_STOPPED_BY_STOP; 221 status &= ~SPU_STATUS_STOPPED_BY_STOP;
201 } 222 }
202 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 223 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
203 ret = spu_reacquire_runnable(ctx, npc, status); 224 ret = spu_reacquire_runnable(ctx, npc, &status);
204 if (ret) 225 if (ret)
205 goto out; 226 goto out;
206 continue; 227 continue;
207 } 228 }
208 ret = spu_process_events(ctx); 229 ret = spu_process_events(ctx);
209 230
210 } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP | 231 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
211 SPU_STATUS_STOPPED_BY_HALT))); 232 SPU_STATUS_STOPPED_BY_HALT)));
212 233
213 ctx->ops->runcntl_stop(ctx); 234 ctx->ops->runcntl_stop(ctx);
214 ret = spu_run_fini(ctx, npc, status); 235 ret = spu_run_fini(ctx, npc, &status);
215 if (!ret) 236 if (!ret)
216 ret = *status; 237 ret = status;
217 spu_yield(ctx); 238 spu_yield(ctx);
218 239
219out: 240out:
241 *event = ctx->event_return;
220 up(&ctx->run_sema); 242 up(&ctx->run_sema);
221 return ret; 243 return ret;
222} 244}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1350294484b6..bd6fe4b7a84b 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -3,11 +3,7 @@
3 * Copyright (C) IBM 2005 3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com> 4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 * 5 *
6 * SPU scheduler, based on Linux thread priority. For now use 6 * 2006-03-31 NUMA domains added.
7 * a simple "cooperative" yield model with no preemption. SPU
8 * scheduling will eventually be preemptive: When a thread with
9 * a higher static priority gets ready to run, then an active SPU
10 * context will be preempted and returned to the waitq.
11 * 7 *
12 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -37,6 +33,9 @@
37#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
38#include <linux/stddef.h> 34#include <linux/stddef.h>
39#include <linux/unistd.h> 35#include <linux/unistd.h>
36#include <linux/numa.h>
37#include <linux/mutex.h>
38#include <linux/notifier.h>
40 39
41#include <asm/io.h> 40#include <asm/io.h>
42#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
@@ -49,128 +48,59 @@
49 48
50#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) 49#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
51struct spu_prio_array { 50struct spu_prio_array {
52 atomic_t nr_blocked;
53 unsigned long bitmap[SPU_BITMAP_SIZE]; 51 unsigned long bitmap[SPU_BITMAP_SIZE];
54 wait_queue_head_t waitq[MAX_PRIO]; 52 wait_queue_head_t waitq[MAX_PRIO];
53 struct list_head active_list[MAX_NUMNODES];
54 struct mutex active_mutex[MAX_NUMNODES];
55}; 55};
56 56
57/* spu_runqueue - This is the main runqueue data structure for SPUs. */ 57static struct spu_prio_array *spu_prio;
58struct spu_runqueue {
59 struct semaphore sem;
60 unsigned long nr_active;
61 unsigned long nr_idle;
62 unsigned long nr_switches;
63 struct list_head active_list;
64 struct list_head idle_list;
65 struct spu_prio_array prio;
66};
67
68static struct spu_runqueue *spu_runqueues = NULL;
69
70static inline struct spu_runqueue *spu_rq(void)
71{
72 /* Future: make this a per-NODE array,
73 * and use cpu_to_node(smp_processor_id())
74 */
75 return spu_runqueues;
76}
77 58
78static inline struct spu *del_idle(struct spu_runqueue *rq) 59static inline int node_allowed(int node)
79{ 60{
80 struct spu *spu; 61 cpumask_t mask;
81 62
82 BUG_ON(rq->nr_idle <= 0); 63 if (!nr_cpus_node(node))
83 BUG_ON(list_empty(&rq->idle_list)); 64 return 0;
84 /* Future: Move SPU out of low-power SRI state. */ 65 mask = node_to_cpumask(node);
85 spu = list_entry(rq->idle_list.next, struct spu, sched_list); 66 if (!cpus_intersects(mask, current->cpus_allowed))
86 list_del_init(&spu->sched_list); 67 return 0;
87 rq->nr_idle--; 68 return 1;
88 return spu;
89} 69}
90 70
91static inline void del_active(struct spu_runqueue *rq, struct spu *spu) 71static inline void mm_needs_global_tlbie(struct mm_struct *mm)
92{ 72{
93 BUG_ON(rq->nr_active <= 0); 73 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
94 BUG_ON(list_empty(&rq->active_list));
95 list_del_init(&spu->sched_list);
96 rq->nr_active--;
97}
98 74
99static inline void add_idle(struct spu_runqueue *rq, struct spu *spu) 75 /* Global TLBIE broadcast required with SPEs. */
100{ 76 __cpus_setall(&mm->cpu_vm_mask, nr);
101 /* Future: Put SPU into low-power SRI state. */
102 list_add_tail(&spu->sched_list, &rq->idle_list);
103 rq->nr_idle++;
104} 77}
105 78
106static inline void add_active(struct spu_runqueue *rq, struct spu *spu) 79static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
107{
108 rq->nr_active++;
109 rq->nr_switches++;
110 list_add_tail(&spu->sched_list, &rq->active_list);
111}
112 80
113static void prio_wakeup(struct spu_runqueue *rq) 81static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
114{ 82{
115 if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) { 83 blocking_notifier_call_chain(&spu_switch_notifier,
116 int best = sched_find_first_bit(rq->prio.bitmap); 84 ctx ? ctx->object_id : 0, spu);
117 if (best < MAX_PRIO) {
118 wait_queue_head_t *wq = &rq->prio.waitq[best];
119 wake_up_interruptible_nr(wq, 1);
120 }
121 }
122} 85}
123 86
124static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx, 87int spu_switch_event_register(struct notifier_block * n)
125 u64 flags)
126{ 88{
127 int prio = current->prio; 89 return blocking_notifier_chain_register(&spu_switch_notifier, n);
128 wait_queue_head_t *wq = &rq->prio.waitq[prio];
129 DEFINE_WAIT(wait);
130
131 __set_bit(prio, rq->prio.bitmap);
132 atomic_inc(&rq->prio.nr_blocked);
133 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
134 if (!signal_pending(current)) {
135 up(&rq->sem);
136 up_write(&ctx->state_sema);
137 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
138 current->pid, current->prio);
139 schedule();
140 down_write(&ctx->state_sema);
141 down(&rq->sem);
142 }
143 finish_wait(wq, &wait);
144 atomic_dec(&rq->prio.nr_blocked);
145 if (!waitqueue_active(wq))
146 __clear_bit(prio, rq->prio.bitmap);
147} 90}
148 91
149static inline int is_best_prio(struct spu_runqueue *rq) 92int spu_switch_event_unregister(struct notifier_block * n)
150{ 93{
151 int best_prio; 94 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
152
153 best_prio = sched_find_first_bit(rq->prio.bitmap);
154 return (current->prio < best_prio) ? 1 : 0;
155} 95}
156 96
157static inline void mm_needs_global_tlbie(struct mm_struct *mm)
158{
159 /* Global TLBIE broadcast required with SPEs. */
160#if (NR_CPUS > 1)
161 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
162#else
163 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
164#endif
165}
166 97
167static inline void bind_context(struct spu *spu, struct spu_context *ctx) 98static inline void bind_context(struct spu *spu, struct spu_context *ctx)
168{ 99{
169 pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid, 100 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
170 spu->number); 101 spu->number, spu->node);
171 spu->ctx = ctx; 102 spu->ctx = ctx;
172 spu->flags = 0; 103 spu->flags = 0;
173 ctx->flags = 0;
174 ctx->spu = spu; 104 ctx->spu = spu;
175 ctx->ops = &spu_hw_ops; 105 ctx->ops = &spu_hw_ops;
176 spu->pid = current->pid; 106 spu->pid = current->pid;
@@ -181,16 +111,20 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
181 spu->wbox_callback = spufs_wbox_callback; 111 spu->wbox_callback = spufs_wbox_callback;
182 spu->stop_callback = spufs_stop_callback; 112 spu->stop_callback = spufs_stop_callback;
183 spu->mfc_callback = spufs_mfc_callback; 113 spu->mfc_callback = spufs_mfc_callback;
114 spu->dma_callback = spufs_dma_callback;
184 mb(); 115 mb();
185 spu_unmap_mappings(ctx); 116 spu_unmap_mappings(ctx);
186 spu_restore(&ctx->csa, spu); 117 spu_restore(&ctx->csa, spu);
187 spu->timestamp = jiffies; 118 spu->timestamp = jiffies;
119 spu_cpu_affinity_set(spu, raw_smp_processor_id());
120 spu_switch_notify(spu, ctx);
188} 121}
189 122
190static inline void unbind_context(struct spu *spu, struct spu_context *ctx) 123static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
191{ 124{
192 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__, 125 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
193 spu->pid, spu->number); 126 spu->pid, spu->number, spu->node);
127 spu_switch_notify(spu, NULL);
194 spu_unmap_mappings(ctx); 128 spu_unmap_mappings(ctx);
195 spu_save(&ctx->csa, spu); 129 spu_save(&ctx->csa, spu);
196 spu->timestamp = jiffies; 130 spu->timestamp = jiffies;
@@ -199,173 +133,158 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
199 spu->wbox_callback = NULL; 133 spu->wbox_callback = NULL;
200 spu->stop_callback = NULL; 134 spu->stop_callback = NULL;
201 spu->mfc_callback = NULL; 135 spu->mfc_callback = NULL;
136 spu->dma_callback = NULL;
202 spu->mm = NULL; 137 spu->mm = NULL;
203 spu->pid = 0; 138 spu->pid = 0;
204 spu->prio = MAX_PRIO; 139 spu->prio = MAX_PRIO;
205 ctx->ops = &spu_backing_ops; 140 ctx->ops = &spu_backing_ops;
206 ctx->spu = NULL; 141 ctx->spu = NULL;
207 ctx->flags = 0;
208 spu->flags = 0; 142 spu->flags = 0;
209 spu->ctx = NULL; 143 spu->ctx = NULL;
210} 144}
211 145
212static void spu_reaper(void *data) 146static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
147 int prio)
213{ 148{
214 struct spu_context *ctx = data; 149 prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
215 struct spu *spu; 150 set_bit(prio, spu_prio->bitmap);
216
217 down_write(&ctx->state_sema);
218 spu = ctx->spu;
219 if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
220 if (atomic_read(&spu->rq->prio.nr_blocked)) {
221 pr_debug("%s: spu=%d\n", __func__, spu->number);
222 ctx->ops->runcntl_stop(ctx);
223 spu_deactivate(ctx);
224 wake_up_all(&ctx->stop_wq);
225 } else {
226 clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
227 }
228 }
229 up_write(&ctx->state_sema);
230 put_spu_context(ctx);
231} 151}
232 152
233static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu) 153static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
154 int prio)
234{ 155{
235 struct spu_context *ctx = get_spu_context(spu->ctx); 156 u64 flags;
236 unsigned long now = jiffies;
237 unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE;
238
239 set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
240 INIT_WORK(&ctx->reap_work, spu_reaper, ctx);
241 if (time_after(now, expire))
242 schedule_work(&ctx->reap_work);
243 else
244 schedule_delayed_work(&ctx->reap_work, expire - now);
245}
246 157
247static void check_preempt_active(struct spu_runqueue *rq) 158 __set_current_state(TASK_RUNNING);
248{ 159
249 struct list_head *p; 160 spin_lock_irqsave(&wq->lock, flags);
250 struct spu *worst = NULL; 161
251 162 remove_wait_queue_locked(wq, wait);
252 list_for_each(p, &rq->active_list) { 163 if (list_empty(&wq->task_list))
253 struct spu *spu = list_entry(p, struct spu, sched_list); 164 clear_bit(prio, spu_prio->bitmap);
254 struct spu_context *ctx = spu->ctx; 165
255 if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) { 166 spin_unlock_irqrestore(&wq->lock, flags);
256 if (!worst || (spu->prio > worst->prio)) {
257 worst = spu;
258 }
259 }
260 }
261 if (worst && (current->prio < worst->prio))
262 schedule_spu_reaper(rq, worst);
263} 167}
264 168
265static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags) 169static void spu_prio_wait(struct spu_context *ctx, u64 flags)
266{ 170{
267 struct spu_runqueue *rq; 171 int prio = current->prio;
268 struct spu *spu = NULL; 172 wait_queue_head_t *wq = &spu_prio->waitq[prio];
173 DEFINE_WAIT(wait);
269 174
270 rq = spu_rq(); 175 if (ctx->spu)
271 down(&rq->sem); 176 return;
272 for (;;) { 177
273 if (rq->nr_idle > 0) { 178 spu_add_wq(wq, &wait, prio);
274 if (is_best_prio(rq)) { 179
275 /* Fall through. */ 180 if (!signal_pending(current)) {
276 spu = del_idle(rq); 181 up_write(&ctx->state_sema);
277 break; 182 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
278 } else { 183 current->pid, current->prio);
279 prio_wakeup(rq); 184 schedule();
280 up(&rq->sem); 185 down_write(&ctx->state_sema);
281 yield();
282 if (signal_pending(current)) {
283 return NULL;
284 }
285 rq = spu_rq();
286 down(&rq->sem);
287 continue;
288 }
289 } else {
290 check_preempt_active(rq);
291 prio_wait(rq, ctx, flags);
292 if (signal_pending(current)) {
293 prio_wakeup(rq);
294 spu = NULL;
295 break;
296 }
297 continue;
298 }
299 } 186 }
300 up(&rq->sem); 187
301 return spu; 188 spu_del_wq(wq, &wait, prio);
302} 189}
303 190
304static void put_idle_spu(struct spu *spu) 191static void spu_prio_wakeup(void)
305{ 192{
306 struct spu_runqueue *rq = spu->rq; 193 int best = sched_find_first_bit(spu_prio->bitmap);
307 194 if (best < MAX_PRIO) {
308 down(&rq->sem); 195 wait_queue_head_t *wq = &spu_prio->waitq[best];
309 add_idle(rq, spu); 196 wake_up_interruptible_nr(wq, 1);
310 prio_wakeup(rq); 197 }
311 up(&rq->sem);
312} 198}
313 199
314static int get_active_spu(struct spu *spu) 200static int get_active_spu(struct spu *spu)
315{ 201{
316 struct spu_runqueue *rq = spu->rq; 202 int node = spu->node;
317 struct list_head *p;
318 struct spu *tmp; 203 struct spu *tmp;
319 int rc = 0; 204 int rc = 0;
320 205
321 down(&rq->sem); 206 mutex_lock(&spu_prio->active_mutex[node]);
322 list_for_each(p, &rq->active_list) { 207 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
323 tmp = list_entry(p, struct spu, sched_list);
324 if (tmp == spu) { 208 if (tmp == spu) {
325 del_active(rq, spu); 209 list_del_init(&spu->list);
326 rc = 1; 210 rc = 1;
327 break; 211 break;
328 } 212 }
329 } 213 }
330 up(&rq->sem); 214 mutex_unlock(&spu_prio->active_mutex[node]);
331 return rc; 215 return rc;
332} 216}
333 217
334static void put_active_spu(struct spu *spu) 218static void put_active_spu(struct spu *spu)
335{ 219{
336 struct spu_runqueue *rq = spu->rq; 220 int node = spu->node;
221
222 mutex_lock(&spu_prio->active_mutex[node]);
223 list_add_tail(&spu->list, &spu_prio->active_list[node]);
224 mutex_unlock(&spu_prio->active_mutex[node]);
225}
226
227static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
228{
229 struct spu *spu = NULL;
230 int node = cpu_to_node(raw_smp_processor_id());
231 int n;
337 232
338 down(&rq->sem); 233 for (n = 0; n < MAX_NUMNODES; n++, node++) {
339 add_active(rq, spu); 234 node = (node < MAX_NUMNODES) ? node : 0;
340 up(&rq->sem); 235 if (!node_allowed(node))
236 continue;
237 spu = spu_alloc_node(node);
238 if (spu)
239 break;
240 }
241 return spu;
341} 242}
342 243
343/* Lock order: 244static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
344 * spu_activate() & spu_deactivate() require the 245{
345 * caller to have down_write(&ctx->state_sema). 246 /* Future: spu_get_idle() if possible,
247 * otherwise try to preempt an active
248 * context.
249 */
250 return spu_get_idle(ctx, flags);
251}
252
253/* The three externally callable interfaces
254 * for the scheduler begin here.
346 * 255 *
347 * The rq->sem is breifly held (inside or outside a 256 * spu_activate - bind a context to SPU, waiting as needed.
348 * given ctx lock) for list management, but is never 257 * spu_deactivate - unbind a context from its SPU.
349 * held during save/restore. 258 * spu_yield - yield an SPU if others are waiting.
350 */ 259 */
351 260
352int spu_activate(struct spu_context *ctx, u64 flags) 261int spu_activate(struct spu_context *ctx, u64 flags)
353{ 262{
354 struct spu *spu; 263 struct spu *spu;
264 int ret = 0;
355 265
356 if (ctx->spu) 266 for (;;) {
357 return 0; 267 if (ctx->spu)
358 spu = get_idle_spu(ctx, flags); 268 return 0;
359 if (!spu) 269 spu = spu_get(ctx, flags);
360 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN; 270 if (spu != NULL) {
361 bind_context(spu, ctx); 271 if (ctx->spu != NULL) {
362 /* 272 spu_free(spu);
363 * We're likely to wait for interrupts on the same 273 spu_prio_wakeup();
364 * CPU that we are now on, so send them here. 274 break;
365 */ 275 }
366 spu_cpu_affinity_set(spu, raw_smp_processor_id()); 276 bind_context(spu, ctx);
367 put_active_spu(spu); 277 put_active_spu(spu);
368 return 0; 278 break;
279 }
280 spu_prio_wait(ctx, flags);
281 if (signal_pending(current)) {
282 ret = -ERESTARTSYS;
283 spu_prio_wakeup();
284 break;
285 }
286 }
287 return ret;
369} 288}
370 289
371void spu_deactivate(struct spu_context *ctx) 290void spu_deactivate(struct spu_context *ctx)
@@ -378,8 +297,10 @@ void spu_deactivate(struct spu_context *ctx)
378 return; 297 return;
379 needs_idle = get_active_spu(spu); 298 needs_idle = get_active_spu(spu);
380 unbind_context(spu, ctx); 299 unbind_context(spu, ctx);
381 if (needs_idle) 300 if (needs_idle) {
382 put_idle_spu(spu); 301 spu_free(spu);
302 spu_prio_wakeup();
303 }
383} 304}
384 305
385void spu_yield(struct spu_context *ctx) 306void spu_yield(struct spu_context *ctx)
@@ -387,77 +308,60 @@ void spu_yield(struct spu_context *ctx)
387 struct spu *spu; 308 struct spu *spu;
388 int need_yield = 0; 309 int need_yield = 0;
389 310
390 down_write(&ctx->state_sema); 311 if (down_write_trylock(&ctx->state_sema)) {
391 spu = ctx->spu; 312 if ((spu = ctx->spu) != NULL) {
392 if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) { 313 int best = sched_find_first_bit(spu_prio->bitmap);
393 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number); 314 if (best < MAX_PRIO) {
394 spu_deactivate(ctx); 315 pr_debug("%s: yielding SPU %d NODE %d\n",
395 ctx->state = SPU_STATE_SAVED; 316 __FUNCTION__, spu->number, spu->node);
396 need_yield = 1; 317 spu_deactivate(ctx);
397 } else if (spu) { 318 ctx->state = SPU_STATE_SAVED;
398 spu->prio = MAX_PRIO; 319 need_yield = 1;
320 } else {
321 spu->prio = MAX_PRIO;
322 }
323 }
324 up_write(&ctx->state_sema);
399 } 325 }
400 up_write(&ctx->state_sema);
401 if (unlikely(need_yield)) 326 if (unlikely(need_yield))
402 yield(); 327 yield();
403} 328}
404 329
405int __init spu_sched_init(void) 330int __init spu_sched_init(void)
406{ 331{
407 struct spu_runqueue *rq;
408 struct spu *spu;
409 int i; 332 int i;
410 333
411 rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL); 334 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
412 if (!rq) { 335 if (!spu_prio) {
413 printk(KERN_WARNING "%s: Unable to allocate runqueues.\n", 336 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
414 __FUNCTION__); 337 __FUNCTION__);
415 return 1; 338 return 1;
416 } 339 }
417 memset(rq, 0, sizeof(struct spu_runqueue));
418 init_MUTEX(&rq->sem);
419 INIT_LIST_HEAD(&rq->active_list);
420 INIT_LIST_HEAD(&rq->idle_list);
421 rq->nr_active = 0;
422 rq->nr_idle = 0;
423 rq->nr_switches = 0;
424 atomic_set(&rq->prio.nr_blocked, 0);
425 for (i = 0; i < MAX_PRIO; i++) { 340 for (i = 0; i < MAX_PRIO; i++) {
426 init_waitqueue_head(&rq->prio.waitq[i]); 341 init_waitqueue_head(&spu_prio->waitq[i]);
427 __clear_bit(i, rq->prio.bitmap); 342 __clear_bit(i, spu_prio->bitmap);
428 } 343 }
429 __set_bit(MAX_PRIO, rq->prio.bitmap); 344 __set_bit(MAX_PRIO, spu_prio->bitmap);
430 for (;;) { 345 for (i = 0; i < MAX_NUMNODES; i++) {
431 spu = spu_alloc(); 346 mutex_init(&spu_prio->active_mutex[i]);
432 if (!spu) 347 INIT_LIST_HEAD(&spu_prio->active_list[i]);
433 break;
434 pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
435 add_idle(rq, spu);
436 spu->rq = rq;
437 spu->timestamp = jiffies;
438 }
439 if (!rq->nr_idle) {
440 printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
441 kfree(rq);
442 return 1;
443 } 348 }
444 return 0; 349 return 0;
445} 350}
446 351
447void __exit spu_sched_exit(void) 352void __exit spu_sched_exit(void)
448{ 353{
449 struct spu_runqueue *rq = spu_rq(); 354 struct spu *spu, *tmp;
450 struct spu *spu; 355 int node;
451 356
452 if (!rq) { 357 for (node = 0; node < MAX_NUMNODES; node++) {
453 printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__); 358 mutex_lock(&spu_prio->active_mutex[node]);
454 return; 359 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
455 } 360 list) {
456 while (rq->nr_idle > 0) { 361 list_del_init(&spu->list);
457 spu = del_idle(rq); 362 spu_free(spu);
458 if (!spu) 363 }
459 break; 364 mutex_unlock(&spu_prio->active_mutex[node]);
460 spu_free(spu);
461 } 365 }
462 kfree(rq); 366 kfree(spu_prio);
463} 367}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 4485738e2102..a0f55ca2d488 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -39,6 +39,8 @@ struct spu_context_ops;
39 39
40#define SPU_CONTEXT_PREEMPT 0UL 40#define SPU_CONTEXT_PREEMPT 0UL
41 41
42struct spu_gang;
43
42struct spu_context { 44struct spu_context {
43 struct spu *spu; /* pointer to a physical SPU */ 45 struct spu *spu; /* pointer to a physical SPU */
44 struct spu_state csa; /* SPU context save area. */ 46 struct spu_state csa; /* SPU context save area. */
@@ -48,6 +50,7 @@ struct spu_context {
48 struct address_space *cntl; /* 'control' area mappings. */ 50 struct address_space *cntl; /* 'control' area mappings. */
49 struct address_space *signal1; /* 'signal1' area mappings. */ 51 struct address_space *signal1; /* 'signal1' area mappings. */
50 struct address_space *signal2; /* 'signal2' area mappings. */ 52 struct address_space *signal2; /* 'signal2' area mappings. */
53 u64 object_id; /* user space pointer for oprofile */
51 54
52 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; 55 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
53 struct rw_semaphore state_sema; 56 struct rw_semaphore state_sema;
@@ -66,7 +69,18 @@ struct spu_context {
66 u32 tagwait; 69 u32 tagwait;
67 struct spu_context_ops *ops; 70 struct spu_context_ops *ops;
68 struct work_struct reap_work; 71 struct work_struct reap_work;
69 u64 flags; 72 unsigned long flags;
73 unsigned long event_return;
74
75 struct list_head gang_list;
76 struct spu_gang *gang;
77};
78
79struct spu_gang {
80 struct list_head list;
81 struct mutex mutex;
82 struct kref kref;
83 int contexts;
70}; 84};
71 85
72struct mfc_dma_command { 86struct mfc_dma_command {
@@ -114,6 +128,7 @@ extern struct spu_context_ops spu_backing_ops;
114 128
115struct spufs_inode_info { 129struct spufs_inode_info {
116 struct spu_context *i_ctx; 130 struct spu_context *i_ctx;
131 struct spu_gang *i_gang;
117 struct inode vfs_inode; 132 struct inode vfs_inode;
118}; 133};
119#define SPUFS_I(inode) \ 134#define SPUFS_I(inode) \
@@ -124,12 +139,19 @@ extern struct tree_descr spufs_dir_contents[];
124/* system call implementation */ 139/* system call implementation */
125long spufs_run_spu(struct file *file, 140long spufs_run_spu(struct file *file,
126 struct spu_context *ctx, u32 *npc, u32 *status); 141 struct spu_context *ctx, u32 *npc, u32 *status);
127long spufs_create_thread(struct nameidata *nd, 142long spufs_create(struct nameidata *nd,
128 unsigned int flags, mode_t mode); 143 unsigned int flags, mode_t mode);
129extern struct file_operations spufs_context_fops; 144extern struct file_operations spufs_context_fops;
130 145
146/* gang management */
147struct spu_gang *alloc_spu_gang(void);
148struct spu_gang *get_spu_gang(struct spu_gang *gang);
149int put_spu_gang(struct spu_gang *gang);
150void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
151void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
152
131/* context management */ 153/* context management */
132struct spu_context * alloc_spu_context(void); 154struct spu_context * alloc_spu_context(struct spu_gang *gang);
133void destroy_spu_context(struct kref *kref); 155void destroy_spu_context(struct kref *kref);
134struct spu_context * get_spu_context(struct spu_context *ctx); 156struct spu_context * get_spu_context(struct spu_context *ctx);
135int put_spu_context(struct spu_context *ctx); 157int put_spu_context(struct spu_context *ctx);
@@ -183,5 +205,6 @@ void spufs_ibox_callback(struct spu *spu);
183void spufs_wbox_callback(struct spu *spu); 205void spufs_wbox_callback(struct spu *spu);
184void spufs_stop_callback(struct spu *spu); 206void spufs_stop_callback(struct spu *spu);
185void spufs_mfc_callback(struct spu *spu); 207void spufs_mfc_callback(struct spu *spu);
208void spufs_dma_callback(struct spu *spu, int type);
186 209
187#endif 210#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 9d9d82dd32ba..0f782ca662ba 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1779,6 +1779,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1779 */ 1779 */
1780 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); 1780 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1781 eieio(); 1781 eieio();
1782 /*
1783 * FIXME: this is to restart a DMA that we were processing
1784 * before the save. better remember the fault information
1785 * in the csa instead.
1786 */
1787 if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
1788 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
1789 eieio();
1790 }
1782} 1791}
1783 1792
1784static inline void enable_user_access(struct spu_state *csa, struct spu *spu) 1793static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index e6565a949ddc..a6d1ae4dc2a3 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -38,7 +38,7 @@ static long do_spu_run(struct file *filp,
38 u32 npc, status; 38 u32 npc, status;
39 39
40 ret = -EFAULT; 40 ret = -EFAULT;
41 if (get_user(npc, unpc) || get_user(status, ustatus)) 41 if (get_user(npc, unpc))
42 goto out; 42 goto out;
43 43
44 /* check if this file was created by spu_create */ 44 /* check if this file was created by spu_create */
@@ -49,7 +49,10 @@ static long do_spu_run(struct file *filp,
49 i = SPUFS_I(filp->f_dentry->d_inode); 49 i = SPUFS_I(filp->f_dentry->d_inode);
50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
51 51
52 if (put_user(npc, unpc) || put_user(status, ustatus)) 52 if (put_user(npc, unpc))
53 ret = -EFAULT;
54
55 if (ustatus && put_user(status, ustatus))
53 ret = -EFAULT; 56 ret = -EFAULT;
54out: 57out:
55 return ret; 58 return ret;
@@ -87,7 +90,7 @@ asmlinkage long sys_spu_create(const char __user *pathname,
87 ret = path_lookup(tmp, LOOKUP_PARENT| 90 ret = path_lookup(tmp, LOOKUP_PARENT|
88 LOOKUP_OPEN|LOOKUP_CREATE, &nd); 91 LOOKUP_OPEN|LOOKUP_CREATE, &nd);
89 if (!ret) { 92 if (!ret) {
90 ret = spufs_create_thread(&nd, flags, mode); 93 ret = spufs_create(&nd, flags, mode);
91 path_release(&nd); 94 path_release(&nd);
92 } 95 }
93 putname(tmp); 96 putname(tmp);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index b42b53c40f5d..e73ea00efd8b 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -138,6 +138,7 @@ struct spu {
138 void (* ibox_callback)(struct spu *spu); 138 void (* ibox_callback)(struct spu *spu);
139 void (* stop_callback)(struct spu *spu); 139 void (* stop_callback)(struct spu *spu);
140 void (* mfc_callback)(struct spu *spu); 140 void (* mfc_callback)(struct spu *spu);
141 void (* dma_callback)(struct spu *spu, int type);
141 142
142 char irq_c0[8]; 143 char irq_c0[8];
143 char irq_c1[8]; 144 char irq_c1[8];
@@ -147,6 +148,7 @@ struct spu {
147}; 148};
148 149
149struct spu *spu_alloc(void); 150struct spu *spu_alloc(void);
151struct spu *spu_alloc_node(int node);
150void spu_free(struct spu *spu); 152void spu_free(struct spu *spu);
151int spu_irq_class_0_bottom(struct spu *spu); 153int spu_irq_class_0_bottom(struct spu *spu);
152int spu_irq_class_1_bottom(struct spu *spu); 154int spu_irq_class_1_bottom(struct spu *spu);
@@ -168,6 +170,22 @@ extern struct spufs_calls {
168 struct module *owner; 170 struct module *owner;
169} spufs_calls; 171} spufs_calls;
170 172
173/* return status from spu_run, same as in libspe */
174#define SPE_EVENT_DMA_ALIGNMENT 0x0008 /*A DMA alignment error */
175#define SPE_EVENT_SPE_ERROR 0x0010 /*An illegal instruction error*/
176#define SPE_EVENT_SPE_DATA_SEGMENT 0x0020 /*A DMA segmentation error */
177#define SPE_EVENT_SPE_DATA_STORAGE 0x0040 /*A DMA storage error */
178#define SPE_EVENT_INVALID_DMA 0x0800 /* Invalid MFC DMA */
179
180/*
181 * Flags for sys_spu_create.
182 */
183#define SPU_CREATE_EVENTS_ENABLED 0x0001
184#define SPU_CREATE_GANG 0x0002
185
186#define SPU_CREATE_FLAG_ALL 0x0003 /* mask of all valid flags */
187
188
171#ifdef CONFIG_SPU_FS_MODULE 189#ifdef CONFIG_SPU_FS_MODULE
172int register_spu_syscalls(struct spufs_calls *calls); 190int register_spu_syscalls(struct spufs_calls *calls);
173void unregister_spu_syscalls(struct spufs_calls *calls); 191void unregister_spu_syscalls(struct spufs_calls *calls);
@@ -183,6 +201,24 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls)
183 201
184 202
185/* 203/*
204 * Notifier blocks:
205 *
206 * oprofile can get notified when a context switch is performed
207 * on an spe. The notifer function that gets called is passed
208 * a pointer to the SPU structure as well as the object-id that
209 * identifies the binary running on that SPU now.
210 *
211 * For a context save, the object-id that is passed is zero,
212 * identifying that the kernel will run from that moment on.
213 *
214 * For a context restore, the object-id is the value written
215 * to object-id spufs file from user space and the notifer
216 * function can assume that spu->ctx is valid.
217 */
218int spu_switch_event_register(struct notifier_block * n);
219int spu_switch_event_unregister(struct notifier_block * n);
220
221/*
186 * This defines the Local Store, Problem Area and Privlege Area of an SPU. 222 * This defines the Local Store, Problem Area and Privlege Area of an SPU.
187 */ 223 */
188 224