aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-07 11:54:55 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-07 11:54:55 -0400
commit143a275984b37058d2d3ab1ec0e5be9026fda24d (patch)
treef2a27a47dd7de855f8b5a0c3ef96809fc6738eca /arch
parentb74d0deb968e1f85942f17080eace015ce3c332c (diff)
parentf40e524eaec9697d1515564fd5b961d839d2dc4f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] Fix building of COFF zImages [POWERPC] spufs: Fix error handling in spufs_fill_dir() [POWERPC] Add table of contents to booting-without-of.txt [POWERPC] spufs: Don't yield nosched context [POWERPC] Fix typo in booting-without-of-txt section numbering [POWERPC] scc_sio: Fix link failure [POWERPC] cbe_cpufreq: Limit frequency via cpufreq notifier chain [POWERPC] Fix pci_setup_phb_io_dynamic for pci_iomap [POWERPC] spufs scheduler: Fix wakeup races [POWERPC] spufs: Synchronize pte invalidation vs ps close [POWERPC] spufs: Free mm if spufs_fill_dir() failed [POWERPC] spufs: Fix gang destroy leaks [POWERPC] spufs: Hook up spufs_release_mem [POWERPC] spufs: Refuse to load the module when not running on cell [POWERPC] pasemi: Fix iommu + 64K PAGE_SIZE bug
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/boot/crt0.S1
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c33
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c57
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c62
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c161
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/celleb/Makefile2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c8
10 files changed, 167 insertions, 165 deletions
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 5a4215c4b01..f1c4dfc635b 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -13,6 +13,7 @@
13 13
14 .text 14 .text
15 /* a procedure descriptor used when booting this as a COFF file */ 15 /* a procedure descriptor used when booting this as a COFF file */
16 .globl _zimage_start_opd
16_zimage_start_opd: 17_zimage_start_opd:
17 .long _zimage_start, 0, 0, 0 18 .long _zimage_start, 0, 0, 0
18 19
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index d501c23e515..d454f61c9c7 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -433,7 +433,7 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
433 * Note also that we don't do ISA, this will also be fixed with a 433 * Note also that we don't do ISA, this will also be fixed with a
434 * more massive rework. 434 * more massive rework.
435 */ 435 */
436 pci_setup_phb_io(phb, 0); 436 pci_setup_phb_io(phb, pci_io_base == 0);
437 437
438 /* Init pci_dn data structures */ 438 /* Init pci_dn data structures */
439 pci_devs_phb_init_dynamic(phb); 439 pci_devs_phb_init_dynamic(phb);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index f9ac3fe3be9..ac445998d83 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -67,6 +67,7 @@ static u64 MIC_Slow_Next_Timer_table[] = {
67 0x00003FC000000000ull, 67 0x00003FC000000000ull,
68}; 68};
69 69
70static unsigned int pmi_frequency_limit = 0;
70/* 71/*
71 * hardware specific functions 72 * hardware specific functions
72 */ 73 */
@@ -164,7 +165,6 @@ static int set_pmode(int cpu, unsigned int slow_mode) {
164 165
165static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) 166static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
166{ 167{
167 struct cpufreq_policy policy;
168 u8 cpu; 168 u8 cpu;
169 u8 cbe_pmode_new; 169 u8 cbe_pmode_new;
170 170
@@ -173,15 +173,27 @@ static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
173 cpu = cbe_node_to_cpu(pmi_msg.data1); 173 cpu = cbe_node_to_cpu(pmi_msg.data1);
174 cbe_pmode_new = pmi_msg.data2; 174 cbe_pmode_new = pmi_msg.data2;
175 175
176 cpufreq_get_policy(&policy, cpu); 176 pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency;
177 177
178 policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency); 178 pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
179 policy.min = min(policy.min, policy.max); 179}
180
181static int pmi_notifier(struct notifier_block *nb,
182 unsigned long event, void *data)
183{
184 struct cpufreq_policy *policy = data;
180 185
181 pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max); 186 if (event != CPUFREQ_INCOMPATIBLE)
182 cpufreq_set_policy(&policy); 187 return 0;
188
189 cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
190 return 0;
183} 191}
184 192
193static struct notifier_block pmi_notifier_block = {
194 .notifier_call = pmi_notifier,
195};
196
185static struct pmi_handler cbe_pmi_handler = { 197static struct pmi_handler cbe_pmi_handler = {
186 .type = PMI_TYPE_FREQ_CHANGE, 198 .type = PMI_TYPE_FREQ_CHANGE,
187 .handle_pmi_message = cbe_cpufreq_handle_pmi, 199 .handle_pmi_message = cbe_cpufreq_handle_pmi,
@@ -238,12 +250,21 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
238 250
239 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 251 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
240 252
253 if (pmi_dev) {
254 /* frequency might get limited later, initialize limit with max_freq */
255 pmi_frequency_limit = max_freq;
256 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
257 }
258
241 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */ 259 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
242 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); 260 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
243} 261}
244 262
245static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) 263static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
246{ 264{
265 if (pmi_dev)
266 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
267
247 cpufreq_frequency_table_put_attr(policy->cpu); 268 cpufreq_frequency_table_put_attr(policy->cpu);
248 return 0; 269 return 0;
249} 270}
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 8654749e317..7c51cb54bca 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -39,7 +39,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
39 if (spu_init_csa(&ctx->csa)) 39 if (spu_init_csa(&ctx->csa))
40 goto out_free; 40 goto out_free;
41 spin_lock_init(&ctx->mmio_lock); 41 spin_lock_init(&ctx->mmio_lock);
42 spin_lock_init(&ctx->mapping_lock); 42 mutex_init(&ctx->mapping_lock);
43 kref_init(&ctx->kref); 43 kref_init(&ctx->kref);
44 mutex_init(&ctx->state_mutex); 44 mutex_init(&ctx->state_mutex);
45 mutex_init(&ctx->run_mutex); 45 mutex_init(&ctx->run_mutex);
@@ -103,6 +103,7 @@ void spu_forget(struct spu_context *ctx)
103 103
104void spu_unmap_mappings(struct spu_context *ctx) 104void spu_unmap_mappings(struct spu_context *ctx)
105{ 105{
106 mutex_lock(&ctx->mapping_lock);
106 if (ctx->local_store) 107 if (ctx->local_store)
107 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 108 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
108 if (ctx->mfc) 109 if (ctx->mfc)
@@ -117,6 +118,7 @@ void spu_unmap_mappings(struct spu_context *ctx)
117 unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 118 unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
118 if (ctx->psmap) 119 if (ctx->psmap)
119 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 120 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
121 mutex_unlock(&ctx->mapping_lock);
120} 122}
121 123
122/** 124/**
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 45614c73c78..b1e7e2f8a2e 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -45,11 +45,11 @@ spufs_mem_open(struct inode *inode, struct file *file)
45 struct spufs_inode_info *i = SPUFS_I(inode); 45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx; 46 struct spu_context *ctx = i->i_ctx;
47 47
48 spin_lock(&ctx->mapping_lock); 48 mutex_lock(&ctx->mapping_lock);
49 file->private_data = ctx; 49 file->private_data = ctx;
50 if (!i->i_openers++) 50 if (!i->i_openers++)
51 ctx->local_store = inode->i_mapping; 51 ctx->local_store = inode->i_mapping;
52 spin_unlock(&ctx->mapping_lock); 52 mutex_unlock(&ctx->mapping_lock);
53 return 0; 53 return 0;
54} 54}
55 55
@@ -59,10 +59,10 @@ spufs_mem_release(struct inode *inode, struct file *file)
59 struct spufs_inode_info *i = SPUFS_I(inode); 59 struct spufs_inode_info *i = SPUFS_I(inode);
60 struct spu_context *ctx = i->i_ctx; 60 struct spu_context *ctx = i->i_ctx;
61 61
62 spin_lock(&ctx->mapping_lock); 62 mutex_lock(&ctx->mapping_lock);
63 if (!--i->i_openers) 63 if (!--i->i_openers)
64 ctx->local_store = NULL; 64 ctx->local_store = NULL;
65 spin_unlock(&ctx->mapping_lock); 65 mutex_unlock(&ctx->mapping_lock);
66 return 0; 66 return 0;
67} 67}
68 68
@@ -217,6 +217,7 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
217 217
218static const struct file_operations spufs_mem_fops = { 218static const struct file_operations spufs_mem_fops = {
219 .open = spufs_mem_open, 219 .open = spufs_mem_open,
220 .release = spufs_mem_release,
220 .read = spufs_mem_read, 221 .read = spufs_mem_read,
221 .write = spufs_mem_write, 222 .write = spufs_mem_write,
222 .llseek = generic_file_llseek, 223 .llseek = generic_file_llseek,
@@ -309,11 +310,11 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
309 struct spufs_inode_info *i = SPUFS_I(inode); 310 struct spufs_inode_info *i = SPUFS_I(inode);
310 struct spu_context *ctx = i->i_ctx; 311 struct spu_context *ctx = i->i_ctx;
311 312
312 spin_lock(&ctx->mapping_lock); 313 mutex_lock(&ctx->mapping_lock);
313 file->private_data = ctx; 314 file->private_data = ctx;
314 if (!i->i_openers++) 315 if (!i->i_openers++)
315 ctx->cntl = inode->i_mapping; 316 ctx->cntl = inode->i_mapping;
316 spin_unlock(&ctx->mapping_lock); 317 mutex_unlock(&ctx->mapping_lock);
317 return simple_attr_open(inode, file, spufs_cntl_get, 318 return simple_attr_open(inode, file, spufs_cntl_get,
318 spufs_cntl_set, "0x%08lx"); 319 spufs_cntl_set, "0x%08lx");
319} 320}
@@ -326,10 +327,10 @@ spufs_cntl_release(struct inode *inode, struct file *file)
326 327
327 simple_attr_close(inode, file); 328 simple_attr_close(inode, file);
328 329
329 spin_lock(&ctx->mapping_lock); 330 mutex_lock(&ctx->mapping_lock);
330 if (!--i->i_openers) 331 if (!--i->i_openers)
331 ctx->cntl = NULL; 332 ctx->cntl = NULL;
332 spin_unlock(&ctx->mapping_lock); 333 mutex_unlock(&ctx->mapping_lock);
333 return 0; 334 return 0;
334} 335}
335 336
@@ -812,11 +813,11 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
812 struct spufs_inode_info *i = SPUFS_I(inode); 813 struct spufs_inode_info *i = SPUFS_I(inode);
813 struct spu_context *ctx = i->i_ctx; 814 struct spu_context *ctx = i->i_ctx;
814 815
815 spin_lock(&ctx->mapping_lock); 816 mutex_lock(&ctx->mapping_lock);
816 file->private_data = ctx; 817 file->private_data = ctx;
817 if (!i->i_openers++) 818 if (!i->i_openers++)
818 ctx->signal1 = inode->i_mapping; 819 ctx->signal1 = inode->i_mapping;
819 spin_unlock(&ctx->mapping_lock); 820 mutex_unlock(&ctx->mapping_lock);
820 return nonseekable_open(inode, file); 821 return nonseekable_open(inode, file);
821} 822}
822 823
@@ -826,10 +827,10 @@ spufs_signal1_release(struct inode *inode, struct file *file)
826 struct spufs_inode_info *i = SPUFS_I(inode); 827 struct spufs_inode_info *i = SPUFS_I(inode);
827 struct spu_context *ctx = i->i_ctx; 828 struct spu_context *ctx = i->i_ctx;
828 829
829 spin_lock(&ctx->mapping_lock); 830 mutex_lock(&ctx->mapping_lock);
830 if (!--i->i_openers) 831 if (!--i->i_openers)
831 ctx->signal1 = NULL; 832 ctx->signal1 = NULL;
832 spin_unlock(&ctx->mapping_lock); 833 mutex_unlock(&ctx->mapping_lock);
833 return 0; 834 return 0;
834} 835}
835 836
@@ -936,11 +937,11 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
936 struct spufs_inode_info *i = SPUFS_I(inode); 937 struct spufs_inode_info *i = SPUFS_I(inode);
937 struct spu_context *ctx = i->i_ctx; 938 struct spu_context *ctx = i->i_ctx;
938 939
939 spin_lock(&ctx->mapping_lock); 940 mutex_lock(&ctx->mapping_lock);
940 file->private_data = ctx; 941 file->private_data = ctx;
941 if (!i->i_openers++) 942 if (!i->i_openers++)
942 ctx->signal2 = inode->i_mapping; 943 ctx->signal2 = inode->i_mapping;
943 spin_unlock(&ctx->mapping_lock); 944 mutex_unlock(&ctx->mapping_lock);
944 return nonseekable_open(inode, file); 945 return nonseekable_open(inode, file);
945} 946}
946 947
@@ -950,10 +951,10 @@ spufs_signal2_release(struct inode *inode, struct file *file)
950 struct spufs_inode_info *i = SPUFS_I(inode); 951 struct spufs_inode_info *i = SPUFS_I(inode);
951 struct spu_context *ctx = i->i_ctx; 952 struct spu_context *ctx = i->i_ctx;
952 953
953 spin_lock(&ctx->mapping_lock); 954 mutex_lock(&ctx->mapping_lock);
954 if (!--i->i_openers) 955 if (!--i->i_openers)
955 ctx->signal2 = NULL; 956 ctx->signal2 = NULL;
956 spin_unlock(&ctx->mapping_lock); 957 mutex_unlock(&ctx->mapping_lock);
957 return 0; 958 return 0;
958} 959}
959 960
@@ -1154,10 +1155,10 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
1154 1155
1155 file->private_data = i->i_ctx; 1156 file->private_data = i->i_ctx;
1156 1157
1157 spin_lock(&ctx->mapping_lock); 1158 mutex_lock(&ctx->mapping_lock);
1158 if (!i->i_openers++) 1159 if (!i->i_openers++)
1159 ctx->mss = inode->i_mapping; 1160 ctx->mss = inode->i_mapping;
1160 spin_unlock(&ctx->mapping_lock); 1161 mutex_unlock(&ctx->mapping_lock);
1161 return nonseekable_open(inode, file); 1162 return nonseekable_open(inode, file);
1162} 1163}
1163 1164
@@ -1167,10 +1168,10 @@ spufs_mss_release(struct inode *inode, struct file *file)
1167 struct spufs_inode_info *i = SPUFS_I(inode); 1168 struct spufs_inode_info *i = SPUFS_I(inode);
1168 struct spu_context *ctx = i->i_ctx; 1169 struct spu_context *ctx = i->i_ctx;
1169 1170
1170 spin_lock(&ctx->mapping_lock); 1171 mutex_lock(&ctx->mapping_lock);
1171 if (!--i->i_openers) 1172 if (!--i->i_openers)
1172 ctx->mss = NULL; 1173 ctx->mss = NULL;
1173 spin_unlock(&ctx->mapping_lock); 1174 mutex_unlock(&ctx->mapping_lock);
1174 return 0; 1175 return 0;
1175} 1176}
1176 1177
@@ -1211,11 +1212,11 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
1211 struct spufs_inode_info *i = SPUFS_I(inode); 1212 struct spufs_inode_info *i = SPUFS_I(inode);
1212 struct spu_context *ctx = i->i_ctx; 1213 struct spu_context *ctx = i->i_ctx;
1213 1214
1214 spin_lock(&ctx->mapping_lock); 1215 mutex_lock(&ctx->mapping_lock);
1215 file->private_data = i->i_ctx; 1216 file->private_data = i->i_ctx;
1216 if (!i->i_openers++) 1217 if (!i->i_openers++)
1217 ctx->psmap = inode->i_mapping; 1218 ctx->psmap = inode->i_mapping;
1218 spin_unlock(&ctx->mapping_lock); 1219 mutex_unlock(&ctx->mapping_lock);
1219 return nonseekable_open(inode, file); 1220 return nonseekable_open(inode, file);
1220} 1221}
1221 1222
@@ -1225,10 +1226,10 @@ spufs_psmap_release(struct inode *inode, struct file *file)
1225 struct spufs_inode_info *i = SPUFS_I(inode); 1226 struct spufs_inode_info *i = SPUFS_I(inode);
1226 struct spu_context *ctx = i->i_ctx; 1227 struct spu_context *ctx = i->i_ctx;
1227 1228
1228 spin_lock(&ctx->mapping_lock); 1229 mutex_lock(&ctx->mapping_lock);
1229 if (!--i->i_openers) 1230 if (!--i->i_openers)
1230 ctx->psmap = NULL; 1231 ctx->psmap = NULL;
1231 spin_unlock(&ctx->mapping_lock); 1232 mutex_unlock(&ctx->mapping_lock);
1232 return 0; 1233 return 0;
1233} 1234}
1234 1235
@@ -1281,11 +1282,11 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
1281 if (atomic_read(&inode->i_count) != 1) 1282 if (atomic_read(&inode->i_count) != 1)
1282 return -EBUSY; 1283 return -EBUSY;
1283 1284
1284 spin_lock(&ctx->mapping_lock); 1285 mutex_lock(&ctx->mapping_lock);
1285 file->private_data = ctx; 1286 file->private_data = ctx;
1286 if (!i->i_openers++) 1287 if (!i->i_openers++)
1287 ctx->mfc = inode->i_mapping; 1288 ctx->mfc = inode->i_mapping;
1288 spin_unlock(&ctx->mapping_lock); 1289 mutex_unlock(&ctx->mapping_lock);
1289 return nonseekable_open(inode, file); 1290 return nonseekable_open(inode, file);
1290} 1291}
1291 1292
@@ -1295,10 +1296,10 @@ spufs_mfc_release(struct inode *inode, struct file *file)
1295 struct spufs_inode_info *i = SPUFS_I(inode); 1296 struct spufs_inode_info *i = SPUFS_I(inode);
1296 struct spu_context *ctx = i->i_ctx; 1297 struct spu_context *ctx = i->i_ctx;
1297 1298
1298 spin_lock(&ctx->mapping_lock); 1299 mutex_lock(&ctx->mapping_lock);
1299 if (!--i->i_openers) 1300 if (!--i->i_openers)
1300 ctx->mfc = NULL; 1301 ctx->mfc = NULL;
1301 spin_unlock(&ctx->mapping_lock); 1302 mutex_unlock(&ctx->mapping_lock);
1302 return 0; 1303 return 0;
1303} 1304}
1304 1305
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 7150730e2ff..9807206e021 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -177,7 +177,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
177static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, 177static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
178 int mode, struct spu_context *ctx) 178 int mode, struct spu_context *ctx)
179{ 179{
180 struct dentry *dentry; 180 struct dentry *dentry, *tmp;
181 int ret; 181 int ret;
182 182
183 while (files->name && files->name[0]) { 183 while (files->name && files->name[0]) {
@@ -193,7 +193,20 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
193 } 193 }
194 return 0; 194 return 0;
195out: 195out:
196 spufs_prune_dir(dir); 196 /*
197 * remove all children from dir. dir->inode is not set so don't
198 * just simply use spufs_prune_dir() and panic afterwards :)
199 * dput() looks like it will do the right thing:
200 * - dec parent's ref counter
201 * - remove child from parent's child list
202 * - free child's inode if possible
203 * - free child
204 */
205 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
206 dput(dentry);
207 }
208
209 shrink_dcache_parent(dir);
197 return ret; 210 return ret;
198} 211}
199 212
@@ -274,6 +287,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
274 goto out; 287 goto out;
275 288
276out_free_ctx: 289out_free_ctx:
290 spu_forget(ctx);
277 put_spu_context(ctx); 291 put_spu_context(ctx);
278out_iput: 292out_iput:
279 iput(inode); 293 iput(inode);
@@ -349,37 +363,6 @@ out:
349 return ret; 363 return ret;
350} 364}
351 365
352static int spufs_rmgang(struct inode *root, struct dentry *dir)
353{
354 /* FIXME: this fails if the dir is not empty,
355 which causes a leak of gangs. */
356 return simple_rmdir(root, dir);
357}
358
359static int spufs_gang_close(struct inode *inode, struct file *file)
360{
361 struct inode *parent;
362 struct dentry *dir;
363 int ret;
364
365 dir = file->f_path.dentry;
366 parent = dir->d_parent->d_inode;
367
368 ret = spufs_rmgang(parent, dir);
369 WARN_ON(ret);
370
371 return dcache_dir_close(inode, file);
372}
373
374const struct file_operations spufs_gang_fops = {
375 .open = dcache_dir_open,
376 .release = spufs_gang_close,
377 .llseek = dcache_dir_lseek,
378 .read = generic_read_dir,
379 .readdir = dcache_readdir,
380 .fsync = simple_sync_file,
381};
382
383static int 366static int
384spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode) 367spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
385{ 368{
@@ -407,7 +390,6 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
407 inode->i_fop = &simple_dir_operations; 390 inode->i_fop = &simple_dir_operations;
408 391
409 d_instantiate(dentry, inode); 392 d_instantiate(dentry, inode);
410 dget(dentry);
411 dir->i_nlink++; 393 dir->i_nlink++;
412 dentry->d_inode->i_nlink++; 394 dentry->d_inode->i_nlink++;
413 return ret; 395 return ret;
@@ -437,7 +419,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
437 goto out; 419 goto out;
438 } 420 }
439 421
440 filp->f_op = &spufs_gang_fops; 422 filp->f_op = &simple_dir_operations;
441 fd_install(ret, filp); 423 fd_install(ret, filp);
442out: 424out:
443 return ret; 425 return ret;
@@ -458,8 +440,10 @@ static int spufs_create_gang(struct inode *inode,
458 * in error path of *_open(). 440 * in error path of *_open().
459 */ 441 */
460 ret = spufs_gang_open(dget(dentry), mntget(mnt)); 442 ret = spufs_gang_open(dget(dentry), mntget(mnt));
461 if (ret < 0) 443 if (ret < 0) {
462 WARN_ON(spufs_rmgang(inode, dentry)); 444 int err = simple_rmdir(inode, dentry);
445 WARN_ON(err);
446 }
463 447
464out: 448out:
465 mutex_unlock(&inode->i_mutex); 449 mutex_unlock(&inode->i_mutex);
@@ -600,6 +584,10 @@ spufs_create_root(struct super_block *sb, void *data)
600 struct inode *inode; 584 struct inode *inode;
601 int ret; 585 int ret;
602 586
587 ret = -ENODEV;
588 if (!spu_management_ops)
589 goto out;
590
603 ret = -ENOMEM; 591 ret = -ENOMEM;
604 inode = spufs_new_inode(sb, S_IFDIR | 0775); 592 inode = spufs_new_inode(sb, S_IFDIR | 0775);
605 if (!inode) 593 if (!inode)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index b6ecb30e7d5..3b831e07f1e 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx)
93 } 93 }
94} 94}
95 95
96void spu_sched_tick(struct work_struct *work)
97{
98 struct spu_context *ctx =
99 container_of(work, struct spu_context, sched_work.work);
100 struct spu *spu;
101 int preempted = 0;
102
103 /*
104 * If this context is being stopped avoid rescheduling from the
105 * scheduler tick because we would block on the state_mutex.
106 * The caller will yield the spu later on anyway.
107 */
108 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
109 return;
110
111 mutex_lock(&ctx->state_mutex);
112 spu = ctx->spu;
113 if (spu) {
114 int best = sched_find_first_bit(spu_prio->bitmap);
115 if (best <= ctx->prio) {
116 spu_deactivate(ctx);
117 preempted = 1;
118 }
119 }
120 mutex_unlock(&ctx->state_mutex);
121
122 if (preempted) {
123 /*
124 * We need to break out of the wait loop in spu_run manually
125 * to ensure this context gets put on the runqueue again
126 * ASAP.
127 */
128 wake_up(&ctx->stop_wq);
129 } else
130 spu_start_tick(ctx);
131}
132
133/** 96/**
134 * spu_add_to_active_list - add spu to active list 97 * spu_add_to_active_list - add spu to active list
135 * @spu: spu to add to the active list 98 * @spu: spu to add to the active list
@@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx)
273 remove_wait_queue(&ctx->stop_wq, &wait); 236 remove_wait_queue(&ctx->stop_wq, &wait);
274} 237}
275 238
276/**
277 * spu_reschedule - try to find a runnable context for a spu
278 * @spu: spu available
279 *
280 * This function is called whenever a spu becomes idle. It looks for the
281 * most suitable runnable spu context and schedules it for execution.
282 */
283static void spu_reschedule(struct spu *spu)
284{
285 int best;
286
287 spu_free(spu);
288
289 spin_lock(&spu_prio->runq_lock);
290 best = sched_find_first_bit(spu_prio->bitmap);
291 if (best < MAX_PRIO) {
292 struct list_head *rq = &spu_prio->runq[best];
293 struct spu_context *ctx;
294
295 BUG_ON(list_empty(rq));
296
297 ctx = list_entry(rq->next, struct spu_context, rq);
298 __spu_del_from_rq(ctx);
299 wake_up(&ctx->stop_wq);
300 }
301 spin_unlock(&spu_prio->runq_lock);
302}
303
304static struct spu *spu_get_idle(struct spu_context *ctx) 239static struct spu *spu_get_idle(struct spu_context *ctx)
305{ 240{
306 struct spu *spu = NULL; 241 struct spu *spu = NULL;
@@ -429,6 +364,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
429} 364}
430 365
431/** 366/**
367 * grab_runnable_context - try to find a runnable context
368 *
369 * Remove the highest priority context on the runqueue and return it
370 * to the caller. Returns %NULL if no runnable context was found.
371 */
372static struct spu_context *grab_runnable_context(int prio)
373{
374 struct spu_context *ctx = NULL;
375 int best;
376
377 spin_lock(&spu_prio->runq_lock);
378 best = sched_find_first_bit(spu_prio->bitmap);
379 if (best < prio) {
380 struct list_head *rq = &spu_prio->runq[best];
381
382 BUG_ON(list_empty(rq));
383
384 ctx = list_entry(rq->next, struct spu_context, rq);
385 __spu_del_from_rq(ctx);
386 }
387 spin_unlock(&spu_prio->runq_lock);
388
389 return ctx;
390}
391
392static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
393{
394 struct spu *spu = ctx->spu;
395 struct spu_context *new = NULL;
396
397 if (spu) {
398 new = grab_runnable_context(max_prio);
399 if (new || force) {
400 spu_unbind_context(spu, ctx);
401 spu_free(spu);
402 if (new)
403 wake_up(&new->stop_wq);
404 }
405
406 }
407
408 return new != NULL;
409}
410
411/**
432 * spu_deactivate - unbind a context from it's physical spu 412 * spu_deactivate - unbind a context from it's physical spu
433 * @ctx: spu context to unbind 413 * @ctx: spu context to unbind
434 * 414 *
@@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
437 */ 417 */
438void spu_deactivate(struct spu_context *ctx) 418void spu_deactivate(struct spu_context *ctx)
439{ 419{
440 struct spu *spu = ctx->spu; 420 __spu_deactivate(ctx, 1, MAX_PRIO);
441
442 if (spu) {
443 spu_unbind_context(spu, ctx);
444 spu_reschedule(spu);
445 }
446} 421}
447 422
448/** 423/**
@@ -455,21 +430,43 @@ void spu_deactivate(struct spu_context *ctx)
455 */ 430 */
456void spu_yield(struct spu_context *ctx) 431void spu_yield(struct spu_context *ctx)
457{ 432{
458 struct spu *spu; 433 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
459 434 mutex_lock(&ctx->state_mutex);
460 if (mutex_trylock(&ctx->state_mutex)) { 435 __spu_deactivate(ctx, 0, MAX_PRIO);
461 if ((spu = ctx->spu) != NULL) {
462 int best = sched_find_first_bit(spu_prio->bitmap);
463 if (best < MAX_PRIO) {
464 pr_debug("%s: yielding SPU %d NODE %d\n",
465 __FUNCTION__, spu->number, spu->node);
466 spu_deactivate(ctx);
467 }
468 }
469 mutex_unlock(&ctx->state_mutex); 436 mutex_unlock(&ctx->state_mutex);
470 } 437 }
471} 438}
472 439
440void spu_sched_tick(struct work_struct *work)
441{
442 struct spu_context *ctx =
443 container_of(work, struct spu_context, sched_work.work);
444 int preempted;
445
446 /*
447 * If this context is being stopped avoid rescheduling from the
448 * scheduler tick because we would block on the state_mutex.
449 * The caller will yield the spu later on anyway.
450 */
451 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
452 return;
453
454 mutex_lock(&ctx->state_mutex);
455 preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
456 mutex_unlock(&ctx->state_mutex);
457
458 if (preempted) {
459 /*
460 * We need to break out of the wait loop in spu_run manually
461 * to ensure this context gets put on the runqueue again
462 * ASAP.
463 */
464 wake_up(&ctx->stop_wq);
465 } else {
466 spu_start_tick(ctx);
467 }
468}
469
473int __init spu_sched_init(void) 470int __init spu_sched_init(void)
474{ 471{
475 int i; 472 int i;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0a947fd7de5..47617e8014a 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -55,7 +55,7 @@ struct spu_context {
55 struct address_space *signal2; /* 'signal2' area mappings. */ 55 struct address_space *signal2; /* 'signal2' area mappings. */
56 struct address_space *mss; /* 'mss' area mappings. */ 56 struct address_space *mss; /* 'mss' area mappings. */
57 struct address_space *psmap; /* 'psmap' area mappings. */ 57 struct address_space *psmap; /* 'psmap' area mappings. */
58 spinlock_t mapping_lock; 58 struct mutex mapping_lock;
59 u64 object_id; /* user space pointer for oprofile */ 59 u64 object_id; /* user space pointer for oprofile */
60 60
61 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; 61 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
diff --git a/arch/powerpc/platforms/celleb/Makefile b/arch/powerpc/platforms/celleb/Makefile
index f4f82520dc4..5240046d867 100644
--- a/arch/powerpc/platforms/celleb/Makefile
+++ b/arch/powerpc/platforms/celleb/Makefile
@@ -4,5 +4,5 @@ obj-y += interrupt.o iommu.o setup.o \
4 4
5obj-$(CONFIG_SMP) += smp.o 5obj-$(CONFIG_SMP) += smp.o
6obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o 6obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o
7obj-$(CONFIG_HAS_TXX9_SERIAL) += scc_sio.o 7obj-$(CONFIG_SERIAL_TXX9) += scc_sio.o
8obj-$(CONFIG_SPU_BASE) += spu_priv1.o 8obj-$(CONFIG_SPU_BASE) += spu_priv1.o
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 95fa6a7d15e..f33b21b9f5d 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -31,8 +31,6 @@
31#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT) 31#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
32#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1) 32#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
33 33
34#define IOBMAP_PAGE_FACTOR (PAGE_SHIFT - IOBMAP_PAGE_SHIFT)
35
36#define IOB_BASE 0xe0000000 34#define IOB_BASE 0xe0000000
37#define IOB_SIZE 0x3000 35#define IOB_SIZE 0x3000
38/* Configuration registers */ 36/* Configuration registers */
@@ -97,9 +95,6 @@ static void iobmap_build(struct iommu_table *tbl, long index,
97 95
98 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; 96 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
99 97
100 npages <<= IOBMAP_PAGE_FACTOR;
101 index <<= IOBMAP_PAGE_FACTOR;
102
103 ip = ((u32 *)tbl->it_base) + index; 98 ip = ((u32 *)tbl->it_base) + index;
104 99
105 while (npages--) { 100 while (npages--) {
@@ -125,9 +120,6 @@ static void iobmap_free(struct iommu_table *tbl, long index,
125 120
126 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; 121 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
127 122
128 npages <<= IOBMAP_PAGE_FACTOR;
129 index <<= IOBMAP_PAGE_FACTOR;
130
131 ip = ((u32 *)tbl->it_base) + index; 123 ip = ((u32 *)tbl->it_base) + index;
132 124
133 while (npages--) { 125 while (npages--) {