aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/Kconfig13
-rw-r--r--arch/powerpc/platforms/cell/Makefile8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c42
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c225
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c229
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h62
-rw-r--r--arch/powerpc/platforms/cell/setup.c93
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c711
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1.c133
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c88
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile54
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c308
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c167
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c794
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c255
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c486
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c131
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c461
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c336
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S116
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped231
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save.c195
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save_crt0.S102
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped191
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_utils.h160
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h163
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2180
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c101
-rw-r--r--arch/powerpc/platforms/chrp/setup.c16
-rw-r--r--arch/powerpc/platforms/iseries/irq.c337
-rw-r--r--arch/powerpc/platforms/iseries/irq.h1
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c7
-rw-r--r--arch/powerpc/platforms/iseries/setup.c10
-rw-r--r--arch/powerpc/platforms/maple/setup.c24
-rw-r--r--arch/powerpc/platforms/powermac/Makefile7
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c547
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c496
-rw-r--r--arch/powerpc/platforms/powermac/feature.c367
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c1414
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c53
-rw-r--r--arch/powerpc/platforms/powermac/pci.c299
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c405
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c989
-rw-r--r--arch/powerpc/platforms/powermac/pic.c474
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c107
-rw-r--r--arch/powerpc/platforms/powermac/smp.c382
-rw-r--r--arch/powerpc/platforms/powermac/time.c15
-rw-r--r--arch/powerpc/platforms/powermac/udbg_adb.c221
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c165
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c26
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c76
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c174
-rw-r--r--arch/powerpc/platforms/pseries/ras.c11
-rw-r--r--arch/powerpc/platforms/pseries/ras.h9
-rw-r--r--arch/powerpc/platforms/pseries/setup.c55
-rw-r--r--arch/powerpc/platforms/pseries/xics.c2
60 files changed, 13360 insertions, 1385 deletions
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644
index 00000000000..3157071e241
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -0,0 +1,13 @@
1menu "Cell Broadband Engine options"
2 depends on PPC_CELL
3
4config SPU_FS
5 tristate "SPU file system"
6 default m
7 depends on PPC_CELL
8 help
9 The SPU file system is used to access Synergistic Processing
10 Units on machines implementing the Broadband Processor
11 Architecture.
12
13endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 55e094b96bc..16031b565be 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,2 +1,10 @@
1obj-y += interrupt.o iommu.o setup.o spider-pic.o 1obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-y += pervasive.o
3
2obj-$(CONFIG_SMP) += smp.o 4obj-$(CONFIG_SMP) += smp.o
5obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o
6
7spu-base-y += spu_base.o spu_priv1.o
8
9builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
10obj-y += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 7fbe78a9327..63aa52acf44 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -23,6 +23,7 @@
23#include <linux/config.h> 23#include <linux/config.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/module.h>
26#include <linux/percpu.h> 27#include <linux/percpu.h>
27#include <linux/types.h> 28#include <linux/types.h>
28 29
@@ -55,6 +56,7 @@ struct iic_regs {
55 56
56struct iic { 57struct iic {
57 struct iic_regs __iomem *regs; 58 struct iic_regs __iomem *regs;
59 u8 target_id;
58}; 60};
59 61
60static DEFINE_PER_CPU(struct iic, iic); 62static DEFINE_PER_CPU(struct iic, iic);
@@ -172,12 +174,11 @@ int iic_get_irq(struct pt_regs *regs)
172 return irq; 174 return irq;
173} 175}
174 176
175static struct iic_regs __iomem *find_iic(int cpu) 177static int setup_iic(int cpu, struct iic *iic)
176{ 178{
177 struct device_node *np; 179 struct device_node *np;
178 int nodeid = cpu / 2; 180 int nodeid = cpu / 2;
179 unsigned long regs; 181 unsigned long regs;
180 struct iic_regs __iomem *iic_regs;
181 182
182 for (np = of_find_node_by_type(NULL, "cpu"); 183 for (np = of_find_node_by_type(NULL, "cpu");
183 np; 184 np;
@@ -188,20 +189,23 @@ static struct iic_regs __iomem *find_iic(int cpu)
188 189
189 if (!np) { 190 if (!np) {
190 printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); 191 printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
191 iic_regs = NULL; 192 iic->regs = NULL;
192 } else { 193 iic->target_id = 0xff;
193 regs = *(long *)get_property(np, "iic", NULL); 194 return -ENODEV;
194
195 /* hack until we have decided on the devtree info */
196 regs += 0x400;
197 if (cpu & 1)
198 regs += 0x20;
199
200 printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
201 iic_regs = __ioremap(regs, sizeof(struct iic_regs),
202 _PAGE_NO_CACHE);
203 } 195 }
204 return iic_regs; 196
197 regs = *(long *)get_property(np, "iic", NULL);
198
199 /* hack until we have decided on the devtree info */
200 regs += 0x400;
201 if (cpu & 1)
202 regs += 0x20;
203
204 printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
205 iic->regs = __ioremap(regs, sizeof(struct iic_regs),
206 _PAGE_NO_CACHE);
207 iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
208 return 0;
205} 209}
206 210
207#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
@@ -227,6 +231,12 @@ void iic_cause_IPI(int cpu, int mesg)
227 out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); 231 out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
228} 232}
229 233
234u8 iic_get_target_id(int cpu)
235{
236 return per_cpu(iic, cpu).target_id;
237}
238EXPORT_SYMBOL_GPL(iic_get_target_id);
239
230static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 240static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
231{ 241{
232 smp_message_recv(iic_irq_to_ipi(irq), regs); 242 smp_message_recv(iic_irq_to_ipi(irq), regs);
@@ -276,7 +286,7 @@ void iic_init_IRQ(void)
276 irq_offset = 0; 286 irq_offset = 0;
277 for_each_cpu(cpu) { 287 for_each_cpu(cpu) {
278 iic = &per_cpu(iic, cpu); 288 iic = &per_cpu(iic, cpu);
279 iic->regs = find_iic(cpu); 289 setup_iic(cpu, iic);
280 if (iic->regs) 290 if (iic->regs)
281 out_be64(&iic->regs->prio, 0xff); 291 out_be64(&iic->regs->prio, 0xff);
282 } 292 }
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 37d58e6fd0c..a14bd38791c 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -54,6 +54,7 @@ extern void iic_setup_cpu(void);
54extern void iic_local_enable(void); 54extern void iic_local_enable(void);
55extern void iic_local_disable(void); 55extern void iic_local_disable(void);
56 56
57extern u8 iic_get_target_id(int cpu);
57 58
58extern void spider_init_IRQ(void); 59extern void spider_init_IRQ(void);
59extern int spider_get_irq(unsigned long int_pending); 60extern int spider_get_irq(unsigned long int_pending);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 74f999b4ac9..46e7cb9c3e6 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -29,6 +29,8 @@
29#include <linux/bootmem.h> 29#include <linux/bootmem.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/compiler.h>
32 34
33#include <asm/sections.h> 35#include <asm/sections.h>
34#include <asm/iommu.h> 36#include <asm/iommu.h>
@@ -40,6 +42,7 @@
40#include <asm/abs_addr.h> 42#include <asm/abs_addr.h>
41#include <asm/system.h> 43#include <asm/system.h>
42#include <asm/ppc-pci.h> 44#include <asm/ppc-pci.h>
45#include <asm/udbg.h>
43 46
44#include "iommu.h" 47#include "iommu.h"
45 48
@@ -220,8 +223,6 @@ set_iopt_cache(void __iomem *base, unsigned long index,
220{ 223{
221 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; 224 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
222 unsigned long __iomem *p = base + IOC_PT_CACHE_REG; 225 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
223 pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n",
224 index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag);
225 226
226 out_be64(p, val); 227 out_be64(p, val);
227 out_be64(&tags[index], tag); 228 out_be64(&tags[index], tag);
@@ -248,67 +249,176 @@ set_iocmd_config(void __iomem *base)
248 out_be64(p, conf | IOCMD_CONF_TE); 249 out_be64(p, conf | IOCMD_CONF_TE);
249} 250}
250 251
251/* FIXME: get these from the device tree */ 252static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
252#define ioc_base 0x20000511000ull
253#define ioc_mmio_base 0x20000510000ull
254#define ioid 0x48a
255#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */
256#define io_page_size 0x1000000
257
258static unsigned long map_iopt_entry(unsigned long address)
259{ 253{
260 switch (address >> 20) { 254 set_iocmd_config(base);
261 case 0x600: 255 set_iost_origin(mmio_base);
262 address = 0x24020000000ull; /* spider i/o */
263 break;
264 default:
265 address += iopt_phys_offset;
266 break;
267 }
268
269 return get_iopt_entry(address, ioid, IOPT_PROT_RW);
270} 256}
271 257
272static void iommu_bus_setup_null(struct pci_bus *b) { }
273static void iommu_dev_setup_null(struct pci_dev *d) { } 258static void iommu_dev_setup_null(struct pci_dev *d) { }
259static void iommu_bus_setup_null(struct pci_bus *b) { }
260
261struct cell_iommu {
262 unsigned long base;
263 unsigned long mmio_base;
264 void __iomem *mapped_base;
265 void __iomem *mapped_mmio_base;
266};
267
268static struct cell_iommu cell_iommus[NR_CPUS];
274 269
275/* initialize the iommu to support a simple linear mapping 270/* initialize the iommu to support a simple linear mapping
276 * for each DMA window used by any device. For now, we 271 * for each DMA window used by any device. For now, we
277 * happen to know that there is only one DMA window in use, 272 * happen to know that there is only one DMA window in use,
278 * starting at iopt_phys_offset. */ 273 * starting at iopt_phys_offset. */
279static void cell_map_iommu(void) 274static void cell_do_map_iommu(struct cell_iommu *iommu,
275 unsigned int ioid,
276 unsigned long map_start,
277 unsigned long map_size)
280{ 278{
281 unsigned long address; 279 unsigned long io_address, real_address;
282 void __iomem *base; 280 void __iomem *ioc_base, *ioc_mmio_base;
283 ioste ioste; 281 ioste ioste;
284 unsigned long index; 282 unsigned long index;
285 283
286 base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); 284 /* we pretend the io page table was at a very high address */
287 pr_debug("%lx mapped to %p\n", ioc_base, base); 285 const unsigned long fake_iopt = 0x10000000000ul;
288 set_iocmd_config(base); 286 const unsigned long io_page_size = 0x1000000; /* use 16M pages */
289 iounmap(base); 287 const unsigned long io_segment_size = 0x10000000; /* 256M */
288
289 ioc_base = iommu->mapped_base;
290 ioc_mmio_base = iommu->mapped_mmio_base;
291
292 for (real_address = 0, io_address = 0;
293 io_address <= map_start + map_size;
294 real_address += io_page_size, io_address += io_page_size) {
295 ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
296 if ((real_address % io_segment_size) == 0) /* segment start */
297 set_iost_cache(ioc_mmio_base,
298 io_address >> 28, ioste);
299 index = get_ioc_hash_1way(ioste, io_address);
300 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
301 io_address, index, ioste.val);
302 set_iopt_cache(ioc_mmio_base,
303 get_ioc_hash_1way(ioste, io_address),
304 get_ioc_tag(ioste, io_address),
305 get_iopt_entry(real_address-map_start, ioid, IOPT_PROT_RW));
306 }
307}
290 308
291 base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); 309static void iommu_devnode_setup(struct device_node *d)
292 pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); 310{
311 unsigned int *ioid;
312 unsigned long *dma_window, map_start, map_size, token;
313 struct cell_iommu *iommu;
293 314
294 set_iost_origin(base); 315 ioid = (unsigned int *)get_property(d, "ioid", NULL);
316 if (!ioid)
317 pr_debug("No ioid entry found !\n");
295 318
296 for (address = 0; address < 0x100000000ul; address += io_page_size) { 319 dma_window = (unsigned long *)get_property(d, "ibm,dma-window", NULL);
297 ioste = get_iost_entry(0x10000000000ul, address, io_page_size); 320 if (!dma_window)
298 if ((address & 0xfffffff) == 0) /* segment start */ 321 pr_debug("No ibm,dma-window entry found !\n");
299 set_iost_cache(base, address >> 28, ioste); 322
300 index = get_ioc_hash_1way(ioste, address); 323 map_start = dma_window[1];
301 pr_debug("addr %08lx, index %02lx, ioste %016lx\n", 324 map_size = dma_window[2];
302 address, index, ioste.val); 325 token = dma_window[0] >> 32;
303 set_iopt_cache(base, 326
304 get_ioc_hash_1way(ioste, address), 327 iommu = &cell_iommus[token];
305 get_ioc_tag(ioste, address), 328
306 map_iopt_entry(address)); 329 cell_do_map_iommu(iommu, *ioid, map_start, map_size);
307 } 330}
308 iounmap(base); 331
332static void iommu_bus_setup(struct pci_bus *b)
333{
334 struct device_node *d = (struct device_node *)b->sysdata;
335 iommu_devnode_setup(d);
336}
337
338
339static int cell_map_iommu_hardcoded(int num_nodes)
340{
341 struct cell_iommu *iommu = NULL;
342
343 pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
344
345 /* node 0 */
346 iommu = &cell_iommus[0];
347 iommu->mapped_base = __ioremap(0x20000511000, 0x1000, _PAGE_NO_CACHE);
348 iommu->mapped_mmio_base = __ioremap(0x20000510000, 0x1000, _PAGE_NO_CACHE);
349
350 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
351
352 cell_do_map_iommu(iommu, 0x048a,
353 0x20000000ul,0x20000000ul);
354
355 if (num_nodes < 2)
356 return 0;
357
358 /* node 1 */
359 iommu = &cell_iommus[1];
360 iommu->mapped_base = __ioremap(0x30000511000, 0x1000, _PAGE_NO_CACHE);
361 iommu->mapped_mmio_base = __ioremap(0x30000510000, 0x1000, _PAGE_NO_CACHE);
362
363 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
364
365 cell_do_map_iommu(iommu, 0x048a,
366 0x20000000,0x20000000ul);
367
368 return 0;
309} 369}
310 370
311 371
372static int cell_map_iommu(void)
373{
374 unsigned int num_nodes = 0, *node_id;
375 unsigned long *base, *mmio_base;
376 struct device_node *dn;
377 struct cell_iommu *iommu = NULL;
378
379 /* determine number of nodes (=iommus) */
380 pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
381 for(dn = of_find_node_by_type(NULL, "cpu");
382 dn;
383 dn = of_find_node_by_type(dn, "cpu")) {
384 node_id = (unsigned int *)get_property(dn, "node-id", NULL);
385
386 if (num_nodes < *node_id)
387 num_nodes = *node_id;
388 }
389
390 num_nodes++;
391 pr_debug("%i found.\n", num_nodes);
392
393 /* map the iommu registers for each node */
394 pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
395 for(dn = of_find_node_by_type(NULL, "cpu");
396 dn;
397 dn = of_find_node_by_type(dn, "cpu")) {
398
399 node_id = (unsigned int *)get_property(dn, "node-id", NULL);
400 base = (unsigned long *)get_property(dn, "ioc-cache", NULL);
401 mmio_base = (unsigned long *)get_property(dn, "ioc-translation", NULL);
402
403 if (!base || !mmio_base || !node_id)
404 return cell_map_iommu_hardcoded(num_nodes);
405
406 iommu = &cell_iommus[*node_id];
407 iommu->base = *base;
408 iommu->mmio_base = *mmio_base;
409
410 iommu->mapped_base = __ioremap(*base, 0x1000, _PAGE_NO_CACHE);
411 iommu->mapped_mmio_base = __ioremap(*mmio_base, 0x1000, _PAGE_NO_CACHE);
412
413 enable_mapping(iommu->mapped_base,
414 iommu->mapped_mmio_base);
415
416 /* everything else will be done in iommu_bus_setup */
417 }
418
419 return 1;
420}
421
312static void *cell_alloc_coherent(struct device *hwdev, size_t size, 422static void *cell_alloc_coherent(struct device *hwdev, size_t size,
313 dma_addr_t *dma_handle, gfp_t flag) 423 dma_addr_t *dma_handle, gfp_t flag)
314{ 424{
@@ -365,11 +475,28 @@ static int cell_dma_supported(struct device *dev, u64 mask)
365 475
366void cell_init_iommu(void) 476void cell_init_iommu(void)
367{ 477{
368 cell_map_iommu(); 478 int setup_bus = 0;
369 479
370 /* Direct I/O, IOMMU off */ 480 if (of_find_node_by_path("/mambo")) {
371 ppc_md.iommu_dev_setup = iommu_dev_setup_null; 481 pr_info("Not using iommu on systemsim\n");
372 ppc_md.iommu_bus_setup = iommu_bus_setup_null; 482 } else {
483
484 if (!(of_chosen &&
485 get_property(of_chosen, "linux,iommu-off", NULL)))
486 setup_bus = cell_map_iommu();
487
488 if (setup_bus) {
489 pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
490 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
491 ppc_md.iommu_bus_setup = iommu_bus_setup;
492 } else {
493 pr_debug("%s: IOMMU mapping activated, "
494 "no device action necessary\n", __FUNCTION__);
495 /* Direct I/O, IOMMU off */
496 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
497 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
498 }
499 }
373 500
374 pci_dma_ops.alloc_coherent = cell_alloc_coherent; 501 pci_dma_ops.alloc_coherent = cell_alloc_coherent;
375 pci_dma_ops.free_coherent = cell_free_coherent; 502 pci_dma_ops.free_coherent = cell_free_coherent;
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
new file mode 100644
index 00000000000..85152544c15
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -0,0 +1,229 @@
1/*
2 * CBE Pervasive Monitor and Debug
3 *
4 * (C) Copyright IBM Corporation 2005
5 *
6 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
7 * Michael N. Day (mnday@us.ibm.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#undef DEBUG
25
26#include <linux/config.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/percpu.h>
30#include <linux/types.h>
31#include <linux/kallsyms.h>
32
33#include <asm/io.h>
34#include <asm/machdep.h>
35#include <asm/prom.h>
36#include <asm/pgtable.h>
37#include <asm/reg.h>
38
39#include "pervasive.h"
40
41static DEFINE_SPINLOCK(cbe_pervasive_lock);
42struct cbe_pervasive {
43 struct pmd_regs __iomem *regs;
44 unsigned int thread;
45};
46
47/* can't use per_cpu from setup_arch */
48static struct cbe_pervasive cbe_pervasive[NR_CPUS];
49
50static void __init cbe_enable_pause_zero(void)
51{
52 unsigned long thread_switch_control;
53 unsigned long temp_register;
54 struct cbe_pervasive *p;
55 int thread;
56
57 spin_lock_irq(&cbe_pervasive_lock);
58 p = &cbe_pervasive[smp_processor_id()];
59
60 if (!cbe_pervasive->regs)
61 goto out;
62
63 pr_debug("Power Management: CPU %d\n", smp_processor_id());
64
65 /* Enable Pause(0) control bit */
66 temp_register = in_be64(&p->regs->pm_control);
67
68 out_be64(&p->regs->pm_control,
69 temp_register|PMD_PAUSE_ZERO_CONTROL);
70
71 /* Enable DEC and EE interrupt request */
72 thread_switch_control = mfspr(SPRN_TSC_CELL);
73 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
74
75 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
76 case CTRL_CT0:
77 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
78 thread = 0;
79 break;
80 case CTRL_CT1:
81 thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
82 thread = 1;
83 break;
84 default:
85 printk(KERN_WARNING "%s: unknown configuration\n",
86 __FUNCTION__);
87 thread = -1;
88 break;
89 }
90
91 if (p->thread != thread)
92 printk(KERN_WARNING "%s: device tree inconsistant, "
93 "cpu %i: %d/%d\n", __FUNCTION__,
94 smp_processor_id(),
95 p->thread, thread);
96
97 mtspr(SPRN_TSC_CELL, thread_switch_control);
98
99out:
100 spin_unlock_irq(&cbe_pervasive_lock);
101}
102
103static void cbe_idle(void)
104{
105 unsigned long ctrl;
106
107 cbe_enable_pause_zero();
108
109 while (1) {
110 if (!need_resched()) {
111 local_irq_disable();
112 while (!need_resched()) {
113 /* go into low thread priority */
114 HMT_low();
115
116 /*
117 * atomically disable thread execution
118 * and runlatch.
119 * External and Decrementer exceptions
120 * are still handled when the thread
121 * is disabled but now enter in
122 * cbe_system_reset_exception()
123 */
124 ctrl = mfspr(SPRN_CTRLF);
125 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
126 mtspr(SPRN_CTRLT, ctrl);
127 }
128 /* restore thread prio */
129 HMT_medium();
130 local_irq_enable();
131 }
132
133 /*
134 * turn runlatch on again before scheduling the
135 * process we just woke up
136 */
137 ppc64_runlatch_on();
138
139 preempt_enable_no_resched();
140 schedule();
141 preempt_disable();
142 }
143}
144
145int cbe_system_reset_exception(struct pt_regs *regs)
146{
147 switch (regs->msr & SRR1_WAKEMASK) {
148 case SRR1_WAKEEE:
149 do_IRQ(regs);
150 break;
151 case SRR1_WAKEDEC:
152 timer_interrupt(regs);
153 break;
154 case SRR1_WAKEMT:
155 /* no action required */
156 break;
157 default:
158 /* do system reset */
159 return 0;
160 }
161 /* everything handled */
162 return 1;
163}
164
165static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
166{
167 struct device_node *node;
168 unsigned int *int_servers;
169 char *addr;
170 unsigned long real_address;
171 unsigned int size;
172
173 struct pmd_regs __iomem *pmd_mmio_area;
174 int hardid, thread;
175 int proplen;
176
177 pmd_mmio_area = NULL;
178 hardid = get_hard_smp_processor_id(cpu);
179 for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
180 int_servers = (void *) get_property(node,
181 "ibm,ppc-interrupt-server#s", &proplen);
182 if (!int_servers) {
183 printk(KERN_WARNING "%s misses "
184 "ibm,ppc-interrupt-server#s property",
185 node->full_name);
186 continue;
187 }
188 for (thread = 0; thread < proplen / sizeof (int); thread++) {
189 if (hardid == int_servers[thread]) {
190 addr = get_property(node, "pervasive", NULL);
191 goto found;
192 }
193 }
194 }
195
196 printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
197 return -EINVAL;
198
199found:
200 real_address = *(unsigned long*) addr;
201 addr += sizeof (unsigned long);
202 size = *(unsigned int*) addr;
203
204 pr_debug("pervasive area for CPU %d at %lx, size %x\n",
205 cpu, real_address, size);
206 p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE);
207 p->thread = thread;
208 return 0;
209}
210
211void __init cell_pervasive_init(void)
212{
213 struct cbe_pervasive *p;
214 int cpu;
215 int ret;
216
217 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
218 return;
219
220 for_each_cpu(cpu) {
221 p = &cbe_pervasive[cpu];
222 ret = cbe_find_pmd_mmio(cpu, p);
223 if (ret)
224 return;
225 }
226
227 ppc_md.idle_loop = cbe_idle;
228 ppc_md.system_reset_exception = cbe_system_reset_exception;
229}
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
new file mode 100644
index 00000000000..da1fb85ca3e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -0,0 +1,62 @@
1/*
2 * Cell Pervasive Monitor and Debug interface and HW structures
3 *
4 * (C) Copyright IBM Corporation 2005
5 *
6 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
7 * David J. Erb (djerb@us.ibm.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25#ifndef PERVASIVE_H
26#define PERVASIVE_H
27
28struct pmd_regs {
29 u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */
30
31 /* Thermal Sensor Registers */
32 u64 ts_ctsr1; /* 0x0800 */
33 u64 ts_ctsr2; /* 0x0808 */
34 u64 ts_mtsr1; /* 0x0810 */
35 u64 ts_mtsr2; /* 0x0818 */
36 u64 ts_itr1; /* 0x0820 */
37 u64 ts_itr2; /* 0x0828 */
38 u64 ts_gitr; /* 0x0830 */
39 u64 ts_isr; /* 0x0838 */
40 u64 ts_imr; /* 0x0840 */
41 u64 tm_cr1; /* 0x0848 */
42 u64 tm_cr2; /* 0x0850 */
43 u64 tm_simr; /* 0x0858 */
44 u64 tm_tpr; /* 0x0860 */
45 u64 tm_str1; /* 0x0868 */
46 u64 tm_str2; /* 0x0870 */
47 u64 tm_tsr; /* 0x0878 */
48
49 /* Power Management */
50 u64 pm_control; /* 0x0880 */
51#define PMD_PAUSE_ZERO_CONTROL 0x10000
52 u64 pm_status; /* 0x0888 */
53
54 /* Time Base Register */
55 u64 tbr; /* 0x0890 */
56
57 u8 pad_0x0898_0x1000 [0x1000 - 0x0898]; /* 0x0898 */
58};
59
60void __init cell_pervasive_init(void);
61
62#endif
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 9a495634d0c..18e25e65c04 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -33,6 +33,7 @@
33#include <asm/mmu.h> 33#include <asm/mmu.h>
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/kexec.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/prom.h> 38#include <asm/prom.h>
38#include <asm/rtas.h> 39#include <asm/rtas.h>
@@ -48,6 +49,7 @@
48 49
49#include "interrupt.h" 50#include "interrupt.h"
50#include "iommu.h" 51#include "iommu.h"
52#include "pervasive.h"
51 53
52#ifdef DEBUG 54#ifdef DEBUG
53#define DBG(fmt...) udbg_printf(fmt) 55#define DBG(fmt...) udbg_printf(fmt)
@@ -67,6 +69,77 @@ void cell_show_cpuinfo(struct seq_file *m)
67 of_node_put(root); 69 of_node_put(root);
68} 70}
69 71
72#ifdef CONFIG_SPARSEMEM
73static int __init find_spu_node_id(struct device_node *spe)
74{
75 unsigned int *id;
76#ifdef CONFIG_NUMA
77 struct device_node *cpu;
78 cpu = spe->parent->parent;
79 id = (unsigned int *)get_property(cpu, "node-id", NULL);
80#else
81 id = NULL;
82#endif
83 return id ? *id : 0;
84}
85
86static void __init cell_spuprop_present(struct device_node *spe,
87 const char *prop, int early)
88{
89 struct address_prop {
90 unsigned long address;
91 unsigned int len;
92 } __attribute__((packed)) *p;
93 int proplen;
94
95 unsigned long start_pfn, end_pfn, pfn;
96 int node_id;
97
98 p = (void*)get_property(spe, prop, &proplen);
99 WARN_ON(proplen != sizeof (*p));
100
101 node_id = find_spu_node_id(spe);
102
103 start_pfn = p->address >> PAGE_SHIFT;
104 end_pfn = (p->address + p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
105
106 /* We need to call memory_present *before* the call to sparse_init,
107 but we can initialize the page structs only *after* that call.
108 Thus, we're being called twice. */
109 if (early)
110 memory_present(node_id, start_pfn, end_pfn);
111 else {
112 /* As the pages backing SPU LS and I/O are outside the range
113 of regular memory, their page structs were not initialized
114 by free_area_init. Do it here instead. */
115 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
116 struct page *page = pfn_to_page(pfn);
117 set_page_links(page, ZONE_DMA, node_id, pfn);
118 set_page_count(page, 1);
119 reset_page_mapcount(page);
120 SetPageReserved(page);
121 INIT_LIST_HEAD(&page->lru);
122 }
123 }
124}
125
126static void __init cell_spumem_init(int early)
127{
128 struct device_node *node;
129 for (node = of_find_node_by_type(NULL, "spe");
130 node; node = of_find_node_by_type(node, "spe")) {
131 cell_spuprop_present(node, "local-store", early);
132 cell_spuprop_present(node, "problem", early);
133 cell_spuprop_present(node, "priv1", early);
134 cell_spuprop_present(node, "priv2", early);
135 }
136}
137#else
138static void __init cell_spumem_init(int early)
139{
140}
141#endif
142
70static void cell_progress(char *s, unsigned short hex) 143static void cell_progress(char *s, unsigned short hex)
71{ 144{
72 printk("*** %04x : %s\n", hex, s ? s : ""); 145 printk("*** %04x : %s\n", hex, s ? s : "");
@@ -93,11 +166,14 @@ static void __init cell_setup_arch(void)
93 init_pci_config_tokens(); 166 init_pci_config_tokens();
94 find_and_init_phbs(); 167 find_and_init_phbs();
95 spider_init_IRQ(); 168 spider_init_IRQ();
169 cell_pervasive_init();
96#ifdef CONFIG_DUMMY_CONSOLE 170#ifdef CONFIG_DUMMY_CONSOLE
97 conswitchp = &dummy_con; 171 conswitchp = &dummy_con;
98#endif 172#endif
99 173
100 mmio_nvram_init(); 174 mmio_nvram_init();
175
176 cell_spumem_init(0);
101} 177}
102 178
103/* 179/*
@@ -113,6 +189,8 @@ static void __init cell_init_early(void)
113 189
114 ppc64_interrupt_controller = IC_CELL_PIC; 190 ppc64_interrupt_controller = IC_CELL_PIC;
115 191
192 cell_spumem_init(1);
193
116 DBG(" <- cell_init_early()\n"); 194 DBG(" <- cell_init_early()\n");
117} 195}
118 196
@@ -125,6 +203,15 @@ static int __init cell_probe(int platform)
125 return 1; 203 return 1;
126} 204}
127 205
206/*
207 * Cell has no legacy IO; anything calling this function has to
208 * fail or bad things will happen
209 */
210static int cell_check_legacy_ioport(unsigned int baseport)
211{
212 return -ENODEV;
213}
214
128struct machdep_calls __initdata cell_md = { 215struct machdep_calls __initdata cell_md = {
129 .probe = cell_probe, 216 .probe = cell_probe,
130 .setup_arch = cell_setup_arch, 217 .setup_arch = cell_setup_arch,
@@ -137,5 +224,11 @@ struct machdep_calls __initdata cell_md = {
137 .get_rtc_time = rtas_get_rtc_time, 224 .get_rtc_time = rtas_get_rtc_time,
138 .set_rtc_time = rtas_set_rtc_time, 225 .set_rtc_time = rtas_set_rtc_time,
139 .calibrate_decr = generic_calibrate_decr, 226 .calibrate_decr = generic_calibrate_decr,
227 .check_legacy_ioport = cell_check_legacy_ioport,
140 .progress = cell_progress, 228 .progress = cell_progress,
229#ifdef CONFIG_KEXEC
230 .machine_kexec = default_machine_kexec,
231 .machine_kexec_prepare = default_machine_kexec_prepare,
232 .machine_crash_shutdown = default_machine_crash_shutdown,
233#endif
141}; 234};
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644
index 00000000000..d75ae03df68
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -0,0 +1,711 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#undef DEBUG
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
65
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
68}
69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
75
76 pr_debug("%s\n", __FUNCTION__);
77
78 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
79 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
81 */
82 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
84 }
85 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
88 */
89 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
91 }
92
93 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
97
98 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
101
102 spu->slb_replace++;
103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
105
106 spu_restart_dma(spu);
107
108 return 0;
109}
110
111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
113{
114 pr_debug("%s\n", __FUNCTION__);
115
116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
123 }
124
125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
128 }
129
130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
133 if (spu->stop_callback)
134 spu->stop_callback(spu);
135 return 0;
136}
137
138static int __spu_trap_mailbox(struct spu *spu)
139{
140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
142
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
145 spu_int_mask_and(spu, 2, ~0x1);
146 spin_unlock(&spu->register_lock);
147 return 0;
148}
149
150static int __spu_trap_stop(struct spu *spu)
151{
152 pr_debug("%s\n", __FUNCTION__);
153 spu->stop_code = in_be32(&spu->problem->spu_status_R);
154 if (spu->stop_callback)
155 spu->stop_callback(spu);
156 return 0;
157}
158
159static int __spu_trap_halt(struct spu *spu)
160{
161 pr_debug("%s\n", __FUNCTION__);
162 spu->stop_code = in_be32(&spu->problem->spu_status_R);
163 if (spu->stop_callback)
164 spu->stop_callback(spu);
165 return 0;
166}
167
168static int __spu_trap_tag_group(struct spu *spu)
169{
170 pr_debug("%s\n", __FUNCTION__);
171 /* wake_up(&spu->dma_wq); */
172 return 0;
173}
174
175static int __spu_trap_spubox(struct spu *spu)
176{
177 if (spu->wbox_callback)
178 spu->wbox_callback(spu);
179
180 /* atomically disable SPU mailbox interrupts */
181 spin_lock(&spu->register_lock);
182 spu_int_mask_and(spu, 2, ~0x10);
183 spin_unlock(&spu->register_lock);
184 return 0;
185}
186
187static irqreturn_t
188spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
189{
190 struct spu *spu;
191
192 spu = data;
193 spu->class_0_pending = 1;
194 if (spu->stop_callback)
195 spu->stop_callback(spu);
196
197 return IRQ_HANDLED;
198}
199
200int
201spu_irq_class_0_bottom(struct spu *spu)
202{
203 unsigned long stat, mask;
204
205 spu->class_0_pending = 0;
206
207 mask = spu_int_mask_get(spu, 0);
208 stat = spu_int_stat_get(spu, 0);
209
210 stat &= mask;
211
212 if (stat & 1) /* invalid MFC DMA */
213 __spu_trap_invalid_dma(spu);
214
215 if (stat & 2) /* invalid DMA alignment */
216 __spu_trap_dma_align(spu);
217
218 if (stat & 4) /* error on SPU */
219 __spu_trap_error(spu);
220
221 spu_int_stat_clear(spu, 0, stat);
222
223 return (stat & 0x7) ? -EIO : 0;
224}
225EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
226
227static irqreturn_t
228spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
229{
230 struct spu *spu;
231 unsigned long stat, mask, dar, dsisr;
232
233 spu = data;
234
235 /* atomically read & clear class1 status. */
236 spin_lock(&spu->register_lock);
237 mask = spu_int_mask_get(spu, 1);
238 stat = spu_int_stat_get(spu, 1) & mask;
239 dar = spu_mfc_dar_get(spu);
240 dsisr = spu_mfc_dsisr_get(spu);
241 if (stat & 2) /* mapping fault */
242 spu_mfc_dsisr_set(spu, 0ul);
243 spu_int_stat_clear(spu, 1, stat);
244 spin_unlock(&spu->register_lock);
245
246 if (stat & 1) /* segment fault */
247 __spu_trap_data_seg(spu, dar);
248
249 if (stat & 2) { /* mapping fault */
250 __spu_trap_data_map(spu, dar, dsisr);
251 }
252
253 if (stat & 4) /* ls compare & suspend on get */
254 ;
255
256 if (stat & 8) /* ls compare & suspend on put */
257 ;
258
259 return stat ? IRQ_HANDLED : IRQ_NONE;
260}
261EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
262
263static irqreturn_t
264spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
265{
266 struct spu *spu;
267 unsigned long stat;
268 unsigned long mask;
269
270 spu = data;
271 stat = spu_int_stat_get(spu, 2);
272 mask = spu_int_mask_get(spu, 2);
273
274 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
275
276 stat &= mask;
277
278 if (stat & 1) /* PPC core mailbox */
279 __spu_trap_mailbox(spu);
280
281 if (stat & 2) /* SPU stop-and-signal */
282 __spu_trap_stop(spu);
283
284 if (stat & 4) /* SPU halted */
285 __spu_trap_halt(spu);
286
287 if (stat & 8) /* DMA tag group complete */
288 __spu_trap_tag_group(spu);
289
290 if (stat & 0x10) /* SPU mailbox threshold */
291 __spu_trap_spubox(spu);
292
293 spu_int_stat_clear(spu, 2, stat);
294 return stat ? IRQ_HANDLED : IRQ_NONE;
295}
296
297static int
298spu_request_irqs(struct spu *spu)
299{
300 int ret;
301 int irq_base;
302
303 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
304
305 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
306 ret = request_irq(irq_base + spu->isrc,
307 spu_irq_class_0, 0, spu->irq_c0, spu);
308 if (ret)
309 goto out;
310
311 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
312 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
313 spu_irq_class_1, 0, spu->irq_c1, spu);
314 if (ret)
315 goto out1;
316
317 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
318 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
319 spu_irq_class_2, 0, spu->irq_c2, spu);
320 if (ret)
321 goto out2;
322 goto out;
323
324out2:
325 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
326out1:
327 free_irq(irq_base + spu->isrc, spu);
328out:
329 return ret;
330}
331
332static void
333spu_free_irqs(struct spu *spu)
334{
335 int irq_base;
336
337 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
338
339 free_irq(irq_base + spu->isrc, spu);
340 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
341 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
342}
343
344static LIST_HEAD(spu_list);
345static DECLARE_MUTEX(spu_mutex);
346
347static void spu_init_channels(struct spu *spu)
348{
349 static const struct {
350 unsigned channel;
351 unsigned count;
352 } zero_list[] = {
353 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
354 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
355 }, count_list[] = {
356 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
357 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
358 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
359 };
360 struct spu_priv2 __iomem *priv2;
361 int i;
362
363 priv2 = spu->priv2;
364
365 /* initialize all channel data to zero */
366 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
367 int count;
368
369 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
370 for (count = 0; count < zero_list[i].count; count++)
371 out_be64(&priv2->spu_chnldata_RW, 0);
372 }
373
374 /* initialize channel counts to meaningful values */
375 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
376 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
377 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
378 }
379}
380
381struct spu *spu_alloc(void)
382{
383 struct spu *spu;
384
385 down(&spu_mutex);
386 if (!list_empty(&spu_list)) {
387 spu = list_entry(spu_list.next, struct spu, list);
388 list_del_init(&spu->list);
389 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
390 } else {
391 pr_debug("No SPU left\n");
392 spu = NULL;
393 }
394 up(&spu_mutex);
395
396 if (spu)
397 spu_init_channels(spu);
398
399 return spu;
400}
401EXPORT_SYMBOL_GPL(spu_alloc);
402
403void spu_free(struct spu *spu)
404{
405 down(&spu_mutex);
406 list_add_tail(&spu->list, &spu_list);
407 up(&spu_mutex);
408}
409EXPORT_SYMBOL_GPL(spu_free);
410
411static int spu_handle_mm_fault(struct spu *spu)
412{
413 struct mm_struct *mm = spu->mm;
414 struct vm_area_struct *vma;
415 u64 ea, dsisr, is_write;
416 int ret;
417
418 ea = spu->dar;
419 dsisr = spu->dsisr;
420#if 0
421 if (!IS_VALID_EA(ea)) {
422 return -EFAULT;
423 }
424#endif /* XXX */
425 if (mm == NULL) {
426 return -EFAULT;
427 }
428 if (mm->pgd == NULL) {
429 return -EFAULT;
430 }
431
432 down_read(&mm->mmap_sem);
433 vma = find_vma(mm, ea);
434 if (!vma)
435 goto bad_area;
436 if (vma->vm_start <= ea)
437 goto good_area;
438 if (!(vma->vm_flags & VM_GROWSDOWN))
439 goto bad_area;
440#if 0
441 if (expand_stack(vma, ea))
442 goto bad_area;
443#endif /* XXX */
444good_area:
445 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
446 if (is_write) {
447 if (!(vma->vm_flags & VM_WRITE))
448 goto bad_area;
449 } else {
450 if (dsisr & MFC_DSISR_ACCESS_DENIED)
451 goto bad_area;
452 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
453 goto bad_area;
454 }
455 ret = 0;
456 switch (handle_mm_fault(mm, vma, ea, is_write)) {
457 case VM_FAULT_MINOR:
458 current->min_flt++;
459 break;
460 case VM_FAULT_MAJOR:
461 current->maj_flt++;
462 break;
463 case VM_FAULT_SIGBUS:
464 ret = -EFAULT;
465 goto bad_area;
466 case VM_FAULT_OOM:
467 ret = -ENOMEM;
468 goto bad_area;
469 default:
470 BUG();
471 }
472 up_read(&mm->mmap_sem);
473 return ret;
474
475bad_area:
476 up_read(&mm->mmap_sem);
477 return -EFAULT;
478}
479
480int spu_irq_class_1_bottom(struct spu *spu)
481{
482 u64 ea, dsisr, access, error = 0UL;
483 int ret = 0;
484
485 ea = spu->dar;
486 dsisr = spu->dsisr;
487 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
488 access = (_PAGE_PRESENT | _PAGE_USER);
489 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
490 if (hash_page(ea, access, 0x300) != 0)
491 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
492 }
493 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
494 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
495 if ((ret = spu_handle_mm_fault(spu)) != 0)
496 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
497 else
498 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
499 }
500 spu->dar = 0UL;
501 spu->dsisr = 0UL;
502 if (!error) {
503 spu_restart_dma(spu);
504 } else {
505 __spu_trap_invalid_dma(spu);
506 }
507 return ret;
508}
509
510void spu_irq_setaffinity(struct spu *spu, int cpu)
511{
512 u64 target = iic_get_target_id(cpu);
513 u64 route = target << 48 | target << 32 | target << 16;
514 spu_int_route_set(spu, route);
515}
516EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
517
518static void __iomem * __init map_spe_prop(struct device_node *n,
519 const char *name)
520{
521 struct address_prop {
522 unsigned long address;
523 unsigned int len;
524 } __attribute__((packed)) *prop;
525
526 void *p;
527 int proplen;
528
529 p = get_property(n, name, &proplen);
530 if (proplen != sizeof (struct address_prop))
531 return NULL;
532
533 prop = p;
534
535 return ioremap(prop->address, prop->len);
536}
537
538static void spu_unmap(struct spu *spu)
539{
540 iounmap(spu->priv2);
541 iounmap(spu->priv1);
542 iounmap(spu->problem);
543 iounmap((u8 __iomem *)spu->local_store);
544}
545
546static int __init spu_map_device(struct spu *spu, struct device_node *spe)
547{
548 char *prop;
549 int ret;
550
551 ret = -ENODEV;
552 prop = get_property(spe, "isrc", NULL);
553 if (!prop)
554 goto out;
555 spu->isrc = *(unsigned int *)prop;
556
557 spu->name = get_property(spe, "name", NULL);
558 if (!spu->name)
559 goto out;
560
561 prop = get_property(spe, "local-store", NULL);
562 if (!prop)
563 goto out;
564 spu->local_store_phys = *(unsigned long *)prop;
565
566 /* we use local store as ram, not io memory */
567 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
568 if (!spu->local_store)
569 goto out;
570
571 spu->problem= map_spe_prop(spe, "problem");
572 if (!spu->problem)
573 goto out_unmap;
574
575 spu->priv1= map_spe_prop(spe, "priv1");
576 /* priv1 is not available on a hypervisor */
577
578 spu->priv2= map_spe_prop(spe, "priv2");
579 if (!spu->priv2)
580 goto out_unmap;
581 ret = 0;
582 goto out;
583
584out_unmap:
585 spu_unmap(spu);
586out:
587 return ret;
588}
589
590static int __init find_spu_node_id(struct device_node *spe)
591{
592 unsigned int *id;
593 struct device_node *cpu;
594
595 cpu = spe->parent->parent;
596 id = (unsigned int *)get_property(cpu, "node-id", NULL);
597
598 return id ? *id : 0;
599}
600
601static int __init create_spu(struct device_node *spe)
602{
603 struct spu *spu;
604 int ret;
605 static int number;
606
607 ret = -ENOMEM;
608 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
609 if (!spu)
610 goto out;
611
612 ret = spu_map_device(spu, spe);
613 if (ret)
614 goto out_free;
615
616 spu->node = find_spu_node_id(spe);
617 spu->stop_code = 0;
618 spu->slb_replace = 0;
619 spu->mm = NULL;
620 spu->ctx = NULL;
621 spu->rq = NULL;
622 spu->pid = 0;
623 spu->class_0_pending = 0;
624 spu->flags = 0UL;
625 spu->dar = 0UL;
626 spu->dsisr = 0UL;
627 spin_lock_init(&spu->register_lock);
628
629 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
630 spu_mfc_sr1_set(spu, 0x33);
631
632 spu->ibox_callback = NULL;
633 spu->wbox_callback = NULL;
634 spu->stop_callback = NULL;
635
636 down(&spu_mutex);
637 spu->number = number++;
638 ret = spu_request_irqs(spu);
639 if (ret)
640 goto out_unmap;
641
642 list_add(&spu->list, &spu_list);
643 up(&spu_mutex);
644
645 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
646 spu->name, spu->isrc, spu->local_store,
647 spu->problem, spu->priv1, spu->priv2, spu->number);
648 goto out;
649
650out_unmap:
651 up(&spu_mutex);
652 spu_unmap(spu);
653out_free:
654 kfree(spu);
655out:
656 return ret;
657}
658
659static void destroy_spu(struct spu *spu)
660{
661 list_del_init(&spu->list);
662
663 spu_free_irqs(spu);
664 spu_unmap(spu);
665 kfree(spu);
666}
667
668static void cleanup_spu_base(void)
669{
670 struct spu *spu, *tmp;
671 down(&spu_mutex);
672 list_for_each_entry_safe(spu, tmp, &spu_list, list)
673 destroy_spu(spu);
674 up(&spu_mutex);
675}
676module_exit(cleanup_spu_base);
677
678static int __init init_spu_base(void)
679{
680 struct device_node *node;
681 int ret;
682
683 ret = -ENODEV;
684 for (node = of_find_node_by_type(NULL, "spe");
685 node; node = of_find_node_by_type(node, "spe")) {
686 ret = create_spu(node);
687 if (ret) {
688 printk(KERN_WARNING "%s: Error initializing %s\n",
689 __FUNCTION__, node->name);
690 cleanup_spu_base();
691 break;
692 }
693 }
694 /* in some old firmware versions, the spe is called 'spc', so we
695 look for that as well */
696 for (node = of_find_node_by_type(NULL, "spc");
697 node; node = of_find_node_by_type(node, "spc")) {
698 ret = create_spu(node);
699 if (ret) {
700 printk(KERN_WARNING "%s: Error initializing %s\n",
701 __FUNCTION__, node->name);
702 cleanup_spu_base();
703 break;
704 }
705 }
706 return ret;
707}
708module_init(init_spu_base);
709
710MODULE_LICENSE("GPL");
711MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/spu_priv1.c b/arch/powerpc/platforms/cell/spu_priv1.c
new file mode 100644
index 00000000000..b2656421c7b
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1.c
@@ -0,0 +1,133 @@
1/*
2 * access to SPU privileged registers
3 */
4#include <linux/module.h>
5
6#include <asm/io.h>
7#include <asm/spu.h>
8
9void spu_int_mask_and(struct spu *spu, int class, u64 mask)
10{
11 u64 old_mask;
12
13 old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
14 out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
15}
16EXPORT_SYMBOL_GPL(spu_int_mask_and);
17
18void spu_int_mask_or(struct spu *spu, int class, u64 mask)
19{
20 u64 old_mask;
21
22 old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
23 out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
24}
25EXPORT_SYMBOL_GPL(spu_int_mask_or);
26
27void spu_int_mask_set(struct spu *spu, int class, u64 mask)
28{
29 out_be64(&spu->priv1->int_mask_RW[class], mask);
30}
31EXPORT_SYMBOL_GPL(spu_int_mask_set);
32
33u64 spu_int_mask_get(struct spu *spu, int class)
34{
35 return in_be64(&spu->priv1->int_mask_RW[class]);
36}
37EXPORT_SYMBOL_GPL(spu_int_mask_get);
38
39void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
40{
41 out_be64(&spu->priv1->int_stat_RW[class], stat);
42}
43EXPORT_SYMBOL_GPL(spu_int_stat_clear);
44
45u64 spu_int_stat_get(struct spu *spu, int class)
46{
47 return in_be64(&spu->priv1->int_stat_RW[class]);
48}
49EXPORT_SYMBOL_GPL(spu_int_stat_get);
50
51void spu_int_route_set(struct spu *spu, u64 route)
52{
53 out_be64(&spu->priv1->int_route_RW, route);
54}
55EXPORT_SYMBOL_GPL(spu_int_route_set);
56
57u64 spu_mfc_dar_get(struct spu *spu)
58{
59 return in_be64(&spu->priv1->mfc_dar_RW);
60}
61EXPORT_SYMBOL_GPL(spu_mfc_dar_get);
62
63u64 spu_mfc_dsisr_get(struct spu *spu)
64{
65 return in_be64(&spu->priv1->mfc_dsisr_RW);
66}
67EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);
68
69void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
70{
71 out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
72}
73EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);
74
75void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
76{
77 out_be64(&spu->priv1->mfc_sdr_RW, sdr);
78}
79EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);
80
81void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
82{
83 out_be64(&spu->priv1->mfc_sr1_RW, sr1);
84}
85EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);
86
87u64 spu_mfc_sr1_get(struct spu *spu)
88{
89 return in_be64(&spu->priv1->mfc_sr1_RW);
90}
91EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);
92
93void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
94{
95 out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
96}
97EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);
98
99u64 spu_mfc_tclass_id_get(struct spu *spu)
100{
101 return in_be64(&spu->priv1->mfc_tclass_id_RW);
102}
103EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);
104
105void spu_tlb_invalidate(struct spu *spu)
106{
107 out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
108}
109EXPORT_SYMBOL_GPL(spu_tlb_invalidate);
110
111void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
112{
113 out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
114}
115EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);
116
117u64 spu_resource_allocation_groupID_get(struct spu *spu)
118{
119 return in_be64(&spu->priv1->resource_allocation_groupID_RW);
120}
121EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);
122
123void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
124{
125 out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
126}
127EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);
128
129u64 spu_resource_allocation_enable_get(struct spu *spu)
130{
131 return in_be64(&spu->priv1->resource_allocation_enable_RW);
132}
133EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
new file mode 100644
index 00000000000..261b507a901
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -0,0 +1,88 @@
1/*
2 * SPU file system -- system call stubs
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/file.h>
23#include <linux/module.h>
24#include <linux/syscalls.h>
25
26#include <asm/spu.h>
27
28struct spufs_calls spufs_calls = {
29 .owner = NULL,
30};
31
32/* These stub syscalls are needed to have the actual implementation
33 * within a loadable module. When spufs is built into the kernel,
34 * this file is not used and the syscalls directly enter the fs code */
35
36asmlinkage long sys_spu_create(const char __user *name,
37 unsigned int flags, mode_t mode)
38{
39 long ret;
40 struct module *owner = spufs_calls.owner;
41
42 ret = -ENOSYS;
43 if (owner && try_module_get(owner)) {
44 ret = spufs_calls.create_thread(name, flags, mode);
45 module_put(owner);
46 }
47 return ret;
48}
49
50asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
51{
52 long ret;
53 struct file *filp;
54 int fput_needed;
55 struct module *owner = spufs_calls.owner;
56
57 ret = -ENOSYS;
58 if (owner && try_module_get(owner)) {
59 ret = -EBADF;
60 filp = fget_light(fd, &fput_needed);
61 if (filp) {
62 ret = spufs_calls.spu_run(filp, unpc, ustatus);
63 fput_light(filp, fput_needed);
64 }
65 module_put(owner);
66 }
67 return ret;
68}
69
70int register_spu_syscalls(struct spufs_calls *calls)
71{
72 if (spufs_calls.owner)
73 return -EBUSY;
74
75 spufs_calls.create_thread = calls->create_thread;
76 spufs_calls.spu_run = calls->spu_run;
77 smp_mb();
78 spufs_calls.owner = calls->owner;
79 return 0;
80}
81EXPORT_SYMBOL_GPL(register_spu_syscalls);
82
83void unregister_spu_syscalls(struct spufs_calls *calls)
84{
85 BUG_ON(spufs_calls.owner != calls->owner);
86 spufs_calls.owner = NULL;
87}
88EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
new file mode 100644
index 00000000000..a7cddf40e3d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -0,0 +1,54 @@
1obj-$(CONFIG_SPU_FS) += spufs.o
2spufs-y += inode.o file.o context.o switch.o syscalls.o
3spufs-y += sched.o backing_ops.o hw_ops.o run.o
4
5# Rules to build switch.o with the help of SPU tool chain
6SPU_CROSS := spu-
7SPU_CC := $(SPU_CROSS)gcc
8SPU_AS := $(SPU_CROSS)gcc
9SPU_LD := $(SPU_CROSS)ld
10SPU_OBJCOPY := $(SPU_CROSS)objcopy
11SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -I$(objtree)/include2
12SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -I$(objtree)/include2
13SPU_LDFLAGS := -N -Ttext=0x0
14
15$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h
16
17# Compile SPU files
18 cmd_spu_cc = $(SPU_CC) $(SPU_CFLAGS) -c -o $@ $<
19quiet_cmd_spu_cc = SPU_CC $@
20$(obj)/spu_%.o: $(src)/spu_%.c
21 $(call if_changed,spu_cc)
22
23# Assemble SPU files
24 cmd_spu_as = $(SPU_AS) $(SPU_AFLAGS) -o $@ $<
25quiet_cmd_spu_as = SPU_AS $@
26$(obj)/spu_%.o: $(src)/spu_%.S
27 $(call if_changed,spu_as)
28
29# Link SPU Executables
30 cmd_spu_ld = $(SPU_LD) $(SPU_LDFLAGS) -o $@ $^
31quiet_cmd_spu_ld = SPU_LD $@
32$(obj)/spu_%: $(obj)/spu_%_crt0.o $(obj)/spu_%.o
33 $(call if_changed,spu_ld)
34
35# Copy into binary format
36 cmd_spu_objcopy = $(SPU_OBJCOPY) -O binary $< $@
37quiet_cmd_spu_objcopy = OBJCOPY $@
38$(obj)/spu_%.bin: $(src)/spu_%
39 $(call if_changed,spu_objcopy)
40
41# create C code from ELF executable
42cmd_hexdump = ( \
43 echo "/*" ; \
44 echo " * $*_dump.h: Copyright (C) 2005 IBM." ; \
45 echo " * Hex-dump auto generated from $*.c." ; \
46 echo " * Do not edit!" ; \
47 echo " */" ; \
48 echo "static unsigned int $*_code[] __page_aligned = {" ; \
49 hexdump -v -e '"0x" 4/1 "%02x" "," "\n"' $< ; \
50 echo "};" ; \
51 ) > $@
52quiet_cmd_hexdump = HEXDUMP $@
53$(obj)/%_dump.h: $(obj)/%.bin
54 $(call if_changed,hexdump)
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
new file mode 100644
index 00000000000..a5c489a53c6
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -0,0 +1,308 @@
1/* backing_ops.c - query/set operations on saved SPU context.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * These register operations allow SPUFS to operate on saved
7 * SPU contexts rather than hardware.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/vmalloc.h>
31#include <linux/smp.h>
32#include <linux/smp_lock.h>
33#include <linux/stddef.h>
34#include <linux/unistd.h>
35#include <linux/poll.h>
36
37#include <asm/io.h>
38#include <asm/spu.h>
39#include <asm/spu_csa.h>
40#include <asm/mmu_context.h>
41#include "spufs.h"
42
43/*
44 * Reads/writes to various problem and priv2 registers require
45 * state changes, i.e. generate SPU events, modify channel
46 * counts, etc.
47 */
48
49static void gen_spu_event(struct spu_context *ctx, u32 event)
50{
51 u64 ch0_cnt;
52 u64 ch0_data;
53 u64 ch1_data;
54
55 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
56 ch0_data = ctx->csa.spu_chnldata_RW[0];
57 ch1_data = ctx->csa.spu_chnldata_RW[1];
58 ctx->csa.spu_chnldata_RW[0] |= event;
59 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
60 ctx->csa.spu_chnlcnt_RW[0] = 1;
61 }
62}
63
64static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
65{
66 u32 mbox_stat;
67 int ret = 0;
68
69 spin_lock(&ctx->csa.register_lock);
70 mbox_stat = ctx->csa.prob.mb_stat_R;
71 if (mbox_stat & 0x0000ff) {
72 /* Read the first available word.
73 * Implementation note: the depth
74 * of pu_mb_R is currently 1.
75 */
76 *data = ctx->csa.prob.pu_mb_R;
77 ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
78 ctx->csa.spu_chnlcnt_RW[28] = 1;
79 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
80 ret = 4;
81 }
82 spin_unlock(&ctx->csa.register_lock);
83 return ret;
84}
85
86static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
87{
88 return ctx->csa.prob.mb_stat_R;
89}
90
91static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
92 unsigned int events)
93{
94 int ret;
95 u32 stat;
96
97 ret = 0;
98 spin_lock_irq(&ctx->csa.register_lock);
99 stat = ctx->csa.prob.mb_stat_R;
100
101 /* if the requested event is there, return the poll
102 mask, otherwise enable the interrupt to get notified,
103 but first mark any pending interrupts as done so
104 we don't get woken up unnecessarily */
105
106 if (events & (POLLIN | POLLRDNORM)) {
107 if (stat & 0xff0000)
108 ret |= POLLIN | POLLRDNORM;
109 else {
110 ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
111 ctx->csa.priv1.int_mask_class2_RW |= 0x1;
112 }
113 }
114 if (events & (POLLOUT | POLLWRNORM)) {
115 if (stat & 0x00ff00)
116 ret = POLLOUT | POLLWRNORM;
117 else {
118 ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
119 ctx->csa.priv1.int_mask_class2_RW |= 0x10;
120 }
121 }
122 spin_unlock_irq(&ctx->csa.register_lock);
123 return ret;
124}
125
126static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
127{
128 int ret;
129
130 spin_lock(&ctx->csa.register_lock);
131 if (ctx->csa.prob.mb_stat_R & 0xff0000) {
132 /* Read the first available word.
133 * Implementation note: the depth
134 * of puint_mb_R is currently 1.
135 */
136 *data = ctx->csa.priv2.puint_mb_R;
137 ctx->csa.prob.mb_stat_R &= ~(0xff0000);
138 ctx->csa.spu_chnlcnt_RW[30] = 1;
139 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
140 ret = 4;
141 } else {
142 /* make sure we get woken up by the interrupt */
143 ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
144 ret = 0;
145 }
146 spin_unlock(&ctx->csa.register_lock);
147 return ret;
148}
149
150static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
151{
152 int ret;
153
154 spin_lock(&ctx->csa.register_lock);
155 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
156 int slot = ctx->csa.spu_chnlcnt_RW[29];
157 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
158
159 /* We have space to write wbox_data.
160 * Implementation note: the depth
161 * of spu_mb_W is currently 4.
162 */
163 BUG_ON(avail != (4 - slot));
164 ctx->csa.spu_mailbox_data[slot] = data;
165 ctx->csa.spu_chnlcnt_RW[29] = ++slot;
166 ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
167 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
168 ret = 4;
169 } else {
170 /* make sure we get woken up by the interrupt when space
171 becomes available */
172 ctx->csa.priv1.int_mask_class2_RW |= 0x10;
173 ret = 0;
174 }
175 spin_unlock(&ctx->csa.register_lock);
176 return ret;
177}
178
179static u32 spu_backing_signal1_read(struct spu_context *ctx)
180{
181 return ctx->csa.spu_chnldata_RW[3];
182}
183
184static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
185{
186 spin_lock(&ctx->csa.register_lock);
187 if (ctx->csa.priv2.spu_cfg_RW & 0x1)
188 ctx->csa.spu_chnldata_RW[3] |= data;
189 else
190 ctx->csa.spu_chnldata_RW[3] = data;
191 ctx->csa.spu_chnlcnt_RW[3] = 1;
192 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
193 spin_unlock(&ctx->csa.register_lock);
194}
195
196static u32 spu_backing_signal2_read(struct spu_context *ctx)
197{
198 return ctx->csa.spu_chnldata_RW[4];
199}
200
201static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
202{
203 spin_lock(&ctx->csa.register_lock);
204 if (ctx->csa.priv2.spu_cfg_RW & 0x2)
205 ctx->csa.spu_chnldata_RW[4] |= data;
206 else
207 ctx->csa.spu_chnldata_RW[4] = data;
208 ctx->csa.spu_chnlcnt_RW[4] = 1;
209 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
210 spin_unlock(&ctx->csa.register_lock);
211}
212
213static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
214{
215 u64 tmp;
216
217 spin_lock(&ctx->csa.register_lock);
218 tmp = ctx->csa.priv2.spu_cfg_RW;
219 if (val)
220 tmp |= 1;
221 else
222 tmp &= ~1;
223 ctx->csa.priv2.spu_cfg_RW = tmp;
224 spin_unlock(&ctx->csa.register_lock);
225}
226
227static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
228{
229 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
230}
231
232static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
233{
234 u64 tmp;
235
236 spin_lock(&ctx->csa.register_lock);
237 tmp = ctx->csa.priv2.spu_cfg_RW;
238 if (val)
239 tmp |= 2;
240 else
241 tmp &= ~2;
242 ctx->csa.priv2.spu_cfg_RW = tmp;
243 spin_unlock(&ctx->csa.register_lock);
244}
245
246static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
247{
248 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
249}
250
251static u32 spu_backing_npc_read(struct spu_context *ctx)
252{
253 return ctx->csa.prob.spu_npc_RW;
254}
255
256static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
257{
258 ctx->csa.prob.spu_npc_RW = val;
259}
260
261static u32 spu_backing_status_read(struct spu_context *ctx)
262{
263 return ctx->csa.prob.spu_status_R;
264}
265
266static char *spu_backing_get_ls(struct spu_context *ctx)
267{
268 return ctx->csa.lscsa->ls;
269}
270
271static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
272{
273 spin_lock(&ctx->csa.register_lock);
274 ctx->csa.prob.spu_runcntl_RW = val;
275 if (val & SPU_RUNCNTL_RUNNABLE) {
276 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
277 } else {
278 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
279 }
280 spin_unlock(&ctx->csa.register_lock);
281}
282
283static void spu_backing_runcntl_stop(struct spu_context *ctx)
284{
285 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
286}
287
288struct spu_context_ops spu_backing_ops = {
289 .mbox_read = spu_backing_mbox_read,
290 .mbox_stat_read = spu_backing_mbox_stat_read,
291 .mbox_stat_poll = spu_backing_mbox_stat_poll,
292 .ibox_read = spu_backing_ibox_read,
293 .wbox_write = spu_backing_wbox_write,
294 .signal1_read = spu_backing_signal1_read,
295 .signal1_write = spu_backing_signal1_write,
296 .signal2_read = spu_backing_signal2_read,
297 .signal2_write = spu_backing_signal2_write,
298 .signal1_type_set = spu_backing_signal1_type_set,
299 .signal1_type_get = spu_backing_signal1_type_get,
300 .signal2_type_set = spu_backing_signal2_type_set,
301 .signal2_type_get = spu_backing_signal2_type_get,
302 .npc_read = spu_backing_npc_read,
303 .npc_write = spu_backing_npc_write,
304 .status_read = spu_backing_status_read,
305 .get_ls = spu_backing_get_ls,
306 .runcntl_write = spu_backing_runcntl_write,
307 .runcntl_stop = spu_backing_runcntl_stop,
308};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
new file mode 100644
index 00000000000..336f238102f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -0,0 +1,167 @@
1/*
2 * SPU file system -- SPU context management
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
26#include <asm/spu.h>
27#include <asm/spu_csa.h>
28#include "spufs.h"
29
30struct spu_context *alloc_spu_context(struct address_space *local_store)
31{
32 struct spu_context *ctx;
33 ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
34 if (!ctx)
35 goto out;
36 /* Binding to physical processor deferred
37 * until spu_activate().
38 */
39 spu_init_csa(&ctx->csa);
40 if (!ctx->csa.lscsa) {
41 goto out_free;
42 }
43 spin_lock_init(&ctx->mmio_lock);
44 kref_init(&ctx->kref);
45 init_rwsem(&ctx->state_sema);
46 init_MUTEX(&ctx->run_sema);
47 init_waitqueue_head(&ctx->ibox_wq);
48 init_waitqueue_head(&ctx->wbox_wq);
49 init_waitqueue_head(&ctx->stop_wq);
50 ctx->ibox_fasync = NULL;
51 ctx->wbox_fasync = NULL;
52 ctx->state = SPU_STATE_SAVED;
53 ctx->local_store = local_store;
54 ctx->spu = NULL;
55 ctx->ops = &spu_backing_ops;
56 ctx->owner = get_task_mm(current);
57 goto out;
58out_free:
59 kfree(ctx);
60 ctx = NULL;
61out:
62 return ctx;
63}
64
65void destroy_spu_context(struct kref *kref)
66{
67 struct spu_context *ctx;
68 ctx = container_of(kref, struct spu_context, kref);
69 down_write(&ctx->state_sema);
70 spu_deactivate(ctx);
71 ctx->ibox_fasync = NULL;
72 ctx->wbox_fasync = NULL;
73 up_write(&ctx->state_sema);
74 spu_fini_csa(&ctx->csa);
75 kfree(ctx);
76}
77
78struct spu_context * get_spu_context(struct spu_context *ctx)
79{
80 kref_get(&ctx->kref);
81 return ctx;
82}
83
84int put_spu_context(struct spu_context *ctx)
85{
86 return kref_put(&ctx->kref, &destroy_spu_context);
87}
88
89/* give up the mm reference when the context is about to be destroyed */
90void spu_forget(struct spu_context *ctx)
91{
92 struct mm_struct *mm;
93 spu_acquire_saved(ctx);
94 mm = ctx->owner;
95 ctx->owner = NULL;
96 mmput(mm);
97 spu_release(ctx);
98}
99
100void spu_acquire(struct spu_context *ctx)
101{
102 down_read(&ctx->state_sema);
103}
104
105void spu_release(struct spu_context *ctx)
106{
107 up_read(&ctx->state_sema);
108}
109
110void spu_unmap_mappings(struct spu_context *ctx)
111{
112 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
113}
114
115int spu_acquire_runnable(struct spu_context *ctx)
116{
117 int ret = 0;
118
119 down_read(&ctx->state_sema);
120 if (ctx->state == SPU_STATE_RUNNABLE) {
121 ctx->spu->prio = current->prio;
122 return 0;
123 }
124 up_read(&ctx->state_sema);
125
126 down_write(&ctx->state_sema);
127 /* ctx is about to be freed, can't acquire any more */
128 if (!ctx->owner) {
129 ret = -EINVAL;
130 goto out;
131 }
132
133 if (ctx->state == SPU_STATE_SAVED) {
134 ret = spu_activate(ctx, 0);
135 if (ret)
136 goto out;
137 ctx->state = SPU_STATE_RUNNABLE;
138 }
139
140 downgrade_write(&ctx->state_sema);
141 /* On success, we return holding the lock */
142
143 return ret;
144out:
145 /* Release here, to simplify calling code. */
146 up_write(&ctx->state_sema);
147
148 return ret;
149}
150
151void spu_acquire_saved(struct spu_context *ctx)
152{
153 down_read(&ctx->state_sema);
154
155 if (ctx->state == SPU_STATE_SAVED)
156 return;
157
158 up_read(&ctx->state_sema);
159 down_write(&ctx->state_sema);
160
161 if (ctx->state == SPU_STATE_RUNNABLE) {
162 spu_deactivate(ctx);
163 ctx->state = SPU_STATE_SAVED;
164 }
165
166 downgrade_write(&ctx->state_sema);
167}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
new file mode 100644
index 00000000000..dfa649c9b95
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -0,0 +1,794 @@
1/*
2 * SPU file system -- file contents
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/fs.h>
24#include <linux/ioctl.h>
25#include <linux/module.h>
26#include <linux/pagemap.h>
27#include <linux/poll.h>
28#include <linux/ptrace.h>
29
30#include <asm/io.h>
31#include <asm/semaphore.h>
32#include <asm/spu.h>
33#include <asm/uaccess.h>
34
35#include "spufs.h"
36
37
38static int
39spufs_mem_open(struct inode *inode, struct file *file)
40{
41 struct spufs_inode_info *i = SPUFS_I(inode);
42 file->private_data = i->i_ctx;
43 file->f_mapping = i->i_ctx->local_store;
44 return 0;
45}
46
47static ssize_t
48spufs_mem_read(struct file *file, char __user *buffer,
49 size_t size, loff_t *pos)
50{
51 struct spu_context *ctx = file->private_data;
52 char *local_store;
53 int ret;
54
55 spu_acquire(ctx);
56
57 local_store = ctx->ops->get_ls(ctx);
58 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
59
60 spu_release(ctx);
61 return ret;
62}
63
64static ssize_t
65spufs_mem_write(struct file *file, const char __user *buffer,
66 size_t size, loff_t *pos)
67{
68 struct spu_context *ctx = file->private_data;
69 char *local_store;
70 int ret;
71
72 size = min_t(ssize_t, LS_SIZE - *pos, size);
73 if (size <= 0)
74 return -EFBIG;
75 *pos += size;
76
77 spu_acquire(ctx);
78
79 local_store = ctx->ops->get_ls(ctx);
80 ret = copy_from_user(local_store + *pos - size,
81 buffer, size) ? -EFAULT : size;
82
83 spu_release(ctx);
84 return ret;
85}
86
87#ifdef CONFIG_SPARSEMEM
88static struct page *
89spufs_mem_mmap_nopage(struct vm_area_struct *vma,
90 unsigned long address, int *type)
91{
92 struct page *page = NOPAGE_SIGBUS;
93
94 struct spu_context *ctx = vma->vm_file->private_data;
95 unsigned long offset = address - vma->vm_start;
96 offset += vma->vm_pgoff << PAGE_SHIFT;
97
98 spu_acquire(ctx);
99
100 if (ctx->state == SPU_STATE_SAVED)
101 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
102 else
103 page = pfn_to_page((ctx->spu->local_store_phys + offset)
104 >> PAGE_SHIFT);
105
106 spu_release(ctx);
107
108 if (type)
109 *type = VM_FAULT_MINOR;
110
111 page_cache_get(page);
112 return page;
113}
114
115static struct vm_operations_struct spufs_mem_mmap_vmops = {
116 .nopage = spufs_mem_mmap_nopage,
117};
118
119static int
120spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
121{
122 if (!(vma->vm_flags & VM_SHARED))
123 return -EINVAL;
124
125 /* FIXME: */
126 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
127 | _PAGE_NO_CACHE);
128
129 vma->vm_ops = &spufs_mem_mmap_vmops;
130 return 0;
131}
132#endif
133
134static struct file_operations spufs_mem_fops = {
135 .open = spufs_mem_open,
136 .read = spufs_mem_read,
137 .write = spufs_mem_write,
138 .llseek = generic_file_llseek,
139#ifdef CONFIG_SPARSEMEM
140 .mmap = spufs_mem_mmap,
141#endif
142};
143
144static int
145spufs_regs_open(struct inode *inode, struct file *file)
146{
147 struct spufs_inode_info *i = SPUFS_I(inode);
148 file->private_data = i->i_ctx;
149 return 0;
150}
151
152static ssize_t
153spufs_regs_read(struct file *file, char __user *buffer,
154 size_t size, loff_t *pos)
155{
156 struct spu_context *ctx = file->private_data;
157 struct spu_lscsa *lscsa = ctx->csa.lscsa;
158 int ret;
159
160 spu_acquire_saved(ctx);
161
162 ret = simple_read_from_buffer(buffer, size, pos,
163 lscsa->gprs, sizeof lscsa->gprs);
164
165 spu_release(ctx);
166 return ret;
167}
168
169static ssize_t
170spufs_regs_write(struct file *file, const char __user *buffer,
171 size_t size, loff_t *pos)
172{
173 struct spu_context *ctx = file->private_data;
174 struct spu_lscsa *lscsa = ctx->csa.lscsa;
175 int ret;
176
177 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
178 if (size <= 0)
179 return -EFBIG;
180 *pos += size;
181
182 spu_acquire_saved(ctx);
183
184 ret = copy_from_user(lscsa->gprs + *pos - size,
185 buffer, size) ? -EFAULT : size;
186
187 spu_release(ctx);
188 return ret;
189}
190
191static struct file_operations spufs_regs_fops = {
192 .open = spufs_regs_open,
193 .read = spufs_regs_read,
194 .write = spufs_regs_write,
195 .llseek = generic_file_llseek,
196};
197
198static ssize_t
199spufs_fpcr_read(struct file *file, char __user * buffer,
200 size_t size, loff_t * pos)
201{
202 struct spu_context *ctx = file->private_data;
203 struct spu_lscsa *lscsa = ctx->csa.lscsa;
204 int ret;
205
206 spu_acquire_saved(ctx);
207
208 ret = simple_read_from_buffer(buffer, size, pos,
209 &lscsa->fpcr, sizeof(lscsa->fpcr));
210
211 spu_release(ctx);
212 return ret;
213}
214
215static ssize_t
216spufs_fpcr_write(struct file *file, const char __user * buffer,
217 size_t size, loff_t * pos)
218{
219 struct spu_context *ctx = file->private_data;
220 struct spu_lscsa *lscsa = ctx->csa.lscsa;
221 int ret;
222
223 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
224 if (size <= 0)
225 return -EFBIG;
226 *pos += size;
227
228 spu_acquire_saved(ctx);
229
230 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
231 buffer, size) ? -EFAULT : size;
232
233 spu_release(ctx);
234 return ret;
235}
236
237static struct file_operations spufs_fpcr_fops = {
238 .open = spufs_regs_open,
239 .read = spufs_fpcr_read,
240 .write = spufs_fpcr_write,
241 .llseek = generic_file_llseek,
242};
243
244/* generic open function for all pipe-like files */
245static int spufs_pipe_open(struct inode *inode, struct file *file)
246{
247 struct spufs_inode_info *i = SPUFS_I(inode);
248 file->private_data = i->i_ctx;
249
250 return nonseekable_open(inode, file);
251}
252
253static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
254 size_t len, loff_t *pos)
255{
256 struct spu_context *ctx = file->private_data;
257 u32 mbox_data;
258 int ret;
259
260 if (len < 4)
261 return -EINVAL;
262
263 spu_acquire(ctx);
264 ret = ctx->ops->mbox_read(ctx, &mbox_data);
265 spu_release(ctx);
266
267 if (!ret)
268 return -EAGAIN;
269
270 if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
271 return -EFAULT;
272
273 return 4;
274}
275
276static struct file_operations spufs_mbox_fops = {
277 .open = spufs_pipe_open,
278 .read = spufs_mbox_read,
279};
280
281static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
282 size_t len, loff_t *pos)
283{
284 struct spu_context *ctx = file->private_data;
285 u32 mbox_stat;
286
287 if (len < 4)
288 return -EINVAL;
289
290 spu_acquire(ctx);
291
292 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
293
294 spu_release(ctx);
295
296 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
297 return -EFAULT;
298
299 return 4;
300}
301
302static struct file_operations spufs_mbox_stat_fops = {
303 .open = spufs_pipe_open,
304 .read = spufs_mbox_stat_read,
305};
306
307/* low-level ibox access function */
308size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
309{
310 return ctx->ops->ibox_read(ctx, data);
311}
312
313static int spufs_ibox_fasync(int fd, struct file *file, int on)
314{
315 struct spu_context *ctx = file->private_data;
316
317 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
318}
319
320/* interrupt-level ibox callback function. */
321void spufs_ibox_callback(struct spu *spu)
322{
323 struct spu_context *ctx = spu->ctx;
324
325 wake_up_all(&ctx->ibox_wq);
326 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
327}
328
329static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
330 size_t len, loff_t *pos)
331{
332 struct spu_context *ctx = file->private_data;
333 u32 ibox_data;
334 ssize_t ret;
335
336 if (len < 4)
337 return -EINVAL;
338
339 spu_acquire(ctx);
340
341 ret = 0;
342 if (file->f_flags & O_NONBLOCK) {
343 if (!spu_ibox_read(ctx, &ibox_data))
344 ret = -EAGAIN;
345 } else {
346 ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
347 }
348
349 spu_release(ctx);
350
351 if (ret)
352 return ret;
353
354 ret = 4;
355 if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
356 ret = -EFAULT;
357
358 return ret;
359}
360
361static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
362{
363 struct spu_context *ctx = file->private_data;
364 unsigned int mask;
365
366 poll_wait(file, &ctx->ibox_wq, wait);
367
368 spu_acquire(ctx);
369 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
370 spu_release(ctx);
371
372 return mask;
373}
374
375static struct file_operations spufs_ibox_fops = {
376 .open = spufs_pipe_open,
377 .read = spufs_ibox_read,
378 .poll = spufs_ibox_poll,
379 .fasync = spufs_ibox_fasync,
380};
381
382static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
383 size_t len, loff_t *pos)
384{
385 struct spu_context *ctx = file->private_data;
386 u32 ibox_stat;
387
388 if (len < 4)
389 return -EINVAL;
390
391 spu_acquire(ctx);
392 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
393 spu_release(ctx);
394
395 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
396 return -EFAULT;
397
398 return 4;
399}
400
401static struct file_operations spufs_ibox_stat_fops = {
402 .open = spufs_pipe_open,
403 .read = spufs_ibox_stat_read,
404};
405
406/* low-level mailbox write */
407size_t spu_wbox_write(struct spu_context *ctx, u32 data)
408{
409 return ctx->ops->wbox_write(ctx, data);
410}
411
412static int spufs_wbox_fasync(int fd, struct file *file, int on)
413{
414 struct spu_context *ctx = file->private_data;
415 int ret;
416
417 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
418
419 return ret;
420}
421
422/* interrupt-level wbox callback function. */
423void spufs_wbox_callback(struct spu *spu)
424{
425 struct spu_context *ctx = spu->ctx;
426
427 wake_up_all(&ctx->wbox_wq);
428 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
429}
430
431static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
432 size_t len, loff_t *pos)
433{
434 struct spu_context *ctx = file->private_data;
435 u32 wbox_data;
436 int ret;
437
438 if (len < 4)
439 return -EINVAL;
440
441 if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
442 return -EFAULT;
443
444 spu_acquire(ctx);
445
446 ret = 0;
447 if (file->f_flags & O_NONBLOCK) {
448 if (!spu_wbox_write(ctx, wbox_data))
449 ret = -EAGAIN;
450 } else {
451 ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
452 }
453
454 spu_release(ctx);
455
456 return ret ? ret : sizeof wbox_data;
457}
458
459static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
460{
461 struct spu_context *ctx = file->private_data;
462 unsigned int mask;
463
464 poll_wait(file, &ctx->wbox_wq, wait);
465
466 spu_acquire(ctx);
467 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
468 spu_release(ctx);
469
470 return mask;
471}
472
473static struct file_operations spufs_wbox_fops = {
474 .open = spufs_pipe_open,
475 .write = spufs_wbox_write,
476 .poll = spufs_wbox_poll,
477 .fasync = spufs_wbox_fasync,
478};
479
480static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
481 size_t len, loff_t *pos)
482{
483 struct spu_context *ctx = file->private_data;
484 u32 wbox_stat;
485
486 if (len < 4)
487 return -EINVAL;
488
489 spu_acquire(ctx);
490 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
491 spu_release(ctx);
492
493 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
494 return -EFAULT;
495
496 return 4;
497}
498
499static struct file_operations spufs_wbox_stat_fops = {
500 .open = spufs_pipe_open,
501 .read = spufs_wbox_stat_read,
502};
503
504static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
505 size_t len, loff_t *pos)
506{
507 struct spu_context *ctx = file->private_data;
508 u32 data;
509
510 if (len < 4)
511 return -EINVAL;
512
513 spu_acquire(ctx);
514 data = ctx->ops->signal1_read(ctx);
515 spu_release(ctx);
516
517 if (copy_to_user(buf, &data, 4))
518 return -EFAULT;
519
520 return 4;
521}
522
523static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
524 size_t len, loff_t *pos)
525{
526 struct spu_context *ctx;
527 u32 data;
528
529 ctx = file->private_data;
530
531 if (len < 4)
532 return -EINVAL;
533
534 if (copy_from_user(&data, buf, 4))
535 return -EFAULT;
536
537 spu_acquire(ctx);
538 ctx->ops->signal1_write(ctx, data);
539 spu_release(ctx);
540
541 return 4;
542}
543
544static struct file_operations spufs_signal1_fops = {
545 .open = spufs_pipe_open,
546 .read = spufs_signal1_read,
547 .write = spufs_signal1_write,
548};
549
550static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
551 size_t len, loff_t *pos)
552{
553 struct spu_context *ctx;
554 u32 data;
555
556 ctx = file->private_data;
557
558 if (len < 4)
559 return -EINVAL;
560
561 spu_acquire(ctx);
562 data = ctx->ops->signal2_read(ctx);
563 spu_release(ctx);
564
565 if (copy_to_user(buf, &data, 4))
566 return -EFAULT;
567
568 return 4;
569}
570
571static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
572 size_t len, loff_t *pos)
573{
574 struct spu_context *ctx;
575 u32 data;
576
577 ctx = file->private_data;
578
579 if (len < 4)
580 return -EINVAL;
581
582 if (copy_from_user(&data, buf, 4))
583 return -EFAULT;
584
585 spu_acquire(ctx);
586 ctx->ops->signal2_write(ctx, data);
587 spu_release(ctx);
588
589 return 4;
590}
591
592static struct file_operations spufs_signal2_fops = {
593 .open = spufs_pipe_open,
594 .read = spufs_signal2_read,
595 .write = spufs_signal2_write,
596};
597
598static void spufs_signal1_type_set(void *data, u64 val)
599{
600 struct spu_context *ctx = data;
601
602 spu_acquire(ctx);
603 ctx->ops->signal1_type_set(ctx, val);
604 spu_release(ctx);
605}
606
607static u64 spufs_signal1_type_get(void *data)
608{
609 struct spu_context *ctx = data;
610 u64 ret;
611
612 spu_acquire(ctx);
613 ret = ctx->ops->signal1_type_get(ctx);
614 spu_release(ctx);
615
616 return ret;
617}
618DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
619 spufs_signal1_type_set, "%llu");
620
621static void spufs_signal2_type_set(void *data, u64 val)
622{
623 struct spu_context *ctx = data;
624
625 spu_acquire(ctx);
626 ctx->ops->signal2_type_set(ctx, val);
627 spu_release(ctx);
628}
629
630static u64 spufs_signal2_type_get(void *data)
631{
632 struct spu_context *ctx = data;
633 u64 ret;
634
635 spu_acquire(ctx);
636 ret = ctx->ops->signal2_type_get(ctx);
637 spu_release(ctx);
638
639 return ret;
640}
641DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
642 spufs_signal2_type_set, "%llu");
643
644static void spufs_npc_set(void *data, u64 val)
645{
646 struct spu_context *ctx = data;
647 spu_acquire(ctx);
648 ctx->ops->npc_write(ctx, val);
649 spu_release(ctx);
650}
651
652static u64 spufs_npc_get(void *data)
653{
654 struct spu_context *ctx = data;
655 u64 ret;
656 spu_acquire(ctx);
657 ret = ctx->ops->npc_read(ctx);
658 spu_release(ctx);
659 return ret;
660}
661DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
662
663static void spufs_decr_set(void *data, u64 val)
664{
665 struct spu_context *ctx = data;
666 struct spu_lscsa *lscsa = ctx->csa.lscsa;
667 spu_acquire_saved(ctx);
668 lscsa->decr.slot[0] = (u32) val;
669 spu_release(ctx);
670}
671
672static u64 spufs_decr_get(void *data)
673{
674 struct spu_context *ctx = data;
675 struct spu_lscsa *lscsa = ctx->csa.lscsa;
676 u64 ret;
677 spu_acquire_saved(ctx);
678 ret = lscsa->decr.slot[0];
679 spu_release(ctx);
680 return ret;
681}
682DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
683 "%llx\n")
684
685static void spufs_decr_status_set(void *data, u64 val)
686{
687 struct spu_context *ctx = data;
688 struct spu_lscsa *lscsa = ctx->csa.lscsa;
689 spu_acquire_saved(ctx);
690 lscsa->decr_status.slot[0] = (u32) val;
691 spu_release(ctx);
692}
693
694static u64 spufs_decr_status_get(void *data)
695{
696 struct spu_context *ctx = data;
697 struct spu_lscsa *lscsa = ctx->csa.lscsa;
698 u64 ret;
699 spu_acquire_saved(ctx);
700 ret = lscsa->decr_status.slot[0];
701 spu_release(ctx);
702 return ret;
703}
704DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
705 spufs_decr_status_set, "%llx\n")
706
707static void spufs_spu_tag_mask_set(void *data, u64 val)
708{
709 struct spu_context *ctx = data;
710 struct spu_lscsa *lscsa = ctx->csa.lscsa;
711 spu_acquire_saved(ctx);
712 lscsa->tag_mask.slot[0] = (u32) val;
713 spu_release(ctx);
714}
715
716static u64 spufs_spu_tag_mask_get(void *data)
717{
718 struct spu_context *ctx = data;
719 struct spu_lscsa *lscsa = ctx->csa.lscsa;
720 u64 ret;
721 spu_acquire_saved(ctx);
722 ret = lscsa->tag_mask.slot[0];
723 spu_release(ctx);
724 return ret;
725}
726DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
727 spufs_spu_tag_mask_set, "%llx\n")
728
729static void spufs_event_mask_set(void *data, u64 val)
730{
731 struct spu_context *ctx = data;
732 struct spu_lscsa *lscsa = ctx->csa.lscsa;
733 spu_acquire_saved(ctx);
734 lscsa->event_mask.slot[0] = (u32) val;
735 spu_release(ctx);
736}
737
738static u64 spufs_event_mask_get(void *data)
739{
740 struct spu_context *ctx = data;
741 struct spu_lscsa *lscsa = ctx->csa.lscsa;
742 u64 ret;
743 spu_acquire_saved(ctx);
744 ret = lscsa->event_mask.slot[0];
745 spu_release(ctx);
746 return ret;
747}
748DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
749 spufs_event_mask_set, "%llx\n")
750
751static void spufs_srr0_set(void *data, u64 val)
752{
753 struct spu_context *ctx = data;
754 struct spu_lscsa *lscsa = ctx->csa.lscsa;
755 spu_acquire_saved(ctx);
756 lscsa->srr0.slot[0] = (u32) val;
757 spu_release(ctx);
758}
759
760static u64 spufs_srr0_get(void *data)
761{
762 struct spu_context *ctx = data;
763 struct spu_lscsa *lscsa = ctx->csa.lscsa;
764 u64 ret;
765 spu_acquire_saved(ctx);
766 ret = lscsa->srr0.slot[0];
767 spu_release(ctx);
768 return ret;
769}
770DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
771 "%llx\n")
772
773struct tree_descr spufs_dir_contents[] = {
774 { "mem", &spufs_mem_fops, 0666, },
775 { "regs", &spufs_regs_fops, 0666, },
776 { "mbox", &spufs_mbox_fops, 0444, },
777 { "ibox", &spufs_ibox_fops, 0444, },
778 { "wbox", &spufs_wbox_fops, 0222, },
779 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
780 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
781 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
782 { "signal1", &spufs_signal1_fops, 0666, },
783 { "signal2", &spufs_signal2_fops, 0666, },
784 { "signal1_type", &spufs_signal1_type, 0666, },
785 { "signal2_type", &spufs_signal2_type, 0666, },
786 { "npc", &spufs_npc_ops, 0666, },
787 { "fpcr", &spufs_fpcr_fops, 0666, },
788 { "decr", &spufs_decr_ops, 0666, },
789 { "decr_status", &spufs_decr_status_ops, 0666, },
790 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
791 { "event_mask", &spufs_event_mask_ops, 0666, },
792 { "srr0", &spufs_srr0_ops, 0666, },
793 {},
794};
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
new file mode 100644
index 00000000000..5445719bff7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -0,0 +1,255 @@
1/* hw_ops.c - query/set operations on active SPU context.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/poll.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/stddef.h>
31#include <linux/unistd.h>
32
33#include <asm/io.h>
34#include <asm/spu.h>
35#include <asm/spu_csa.h>
36#include <asm/mmu_context.h>
37#include "spufs.h"
38
39static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40{
41 struct spu *spu = ctx->spu;
42 struct spu_problem __iomem *prob = spu->problem;
43 u32 mbox_stat;
44 int ret = 0;
45
46 spin_lock_irq(&spu->register_lock);
47 mbox_stat = in_be32(&prob->mb_stat_R);
48 if (mbox_stat & 0x0000ff) {
49 *data = in_be32(&prob->pu_mb_R);
50 ret = 4;
51 }
52 spin_unlock_irq(&spu->register_lock);
53 return ret;
54}
55
56static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57{
58 return in_be32(&ctx->spu->problem->mb_stat_R);
59}
60
61static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62 unsigned int events)
63{
64 struct spu *spu = ctx->spu;
65 int ret = 0;
66 u32 stat;
67
68 spin_lock_irq(&spu->register_lock);
69 stat = in_be32(&spu->problem->mb_stat_R);
70
71 /* if the requested event is there, return the poll
72 mask, otherwise enable the interrupt to get notified,
73 but first mark any pending interrupts as done so
74 we don't get woken up unnecessarily */
75
76 if (events & (POLLIN | POLLRDNORM)) {
77 if (stat & 0xff0000)
78 ret |= POLLIN | POLLRDNORM;
79 else {
80 spu_int_stat_clear(spu, 2, 0x1);
81 spu_int_mask_or(spu, 2, 0x1);
82 }
83 }
84 if (events & (POLLOUT | POLLWRNORM)) {
85 if (stat & 0x00ff00)
86 ret = POLLOUT | POLLWRNORM;
87 else {
88 spu_int_stat_clear(spu, 2, 0x10);
89 spu_int_mask_or(spu, 2, 0x10);
90 }
91 }
92 spin_unlock_irq(&spu->register_lock);
93 return ret;
94}
95
96static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97{
98 struct spu *spu = ctx->spu;
99 struct spu_problem __iomem *prob = spu->problem;
100 struct spu_priv2 __iomem *priv2 = spu->priv2;
101 int ret;
102
103 spin_lock_irq(&spu->register_lock);
104 if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105 /* read the first available word */
106 *data = in_be64(&priv2->puint_mb_R);
107 ret = 4;
108 } else {
109 /* make sure we get woken up by the interrupt */
110 spu_int_mask_or(spu, 2, 0x1);
111 ret = 0;
112 }
113 spin_unlock_irq(&spu->register_lock);
114 return ret;
115}
116
117static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118{
119 struct spu *spu = ctx->spu;
120 struct spu_problem __iomem *prob = spu->problem;
121 int ret;
122
123 spin_lock_irq(&spu->register_lock);
124 if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125 /* we have space to write wbox_data to */
126 out_be32(&prob->spu_mb_W, data);
127 ret = 4;
128 } else {
129 /* make sure we get woken up by the interrupt when space
130 becomes available */
131 spu_int_mask_or(spu, 2, 0x10);
132 ret = 0;
133 }
134 spin_unlock_irq(&spu->register_lock);
135 return ret;
136}
137
138static u32 spu_hw_signal1_read(struct spu_context *ctx)
139{
140 return in_be32(&ctx->spu->problem->signal_notify1);
141}
142
143static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
144{
145 out_be32(&ctx->spu->problem->signal_notify1, data);
146}
147
148static u32 spu_hw_signal2_read(struct spu_context *ctx)
149{
150 return in_be32(&ctx->spu->problem->signal_notify1);
151}
152
153static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
154{
155 out_be32(&ctx->spu->problem->signal_notify2, data);
156}
157
158static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
159{
160 struct spu *spu = ctx->spu;
161 struct spu_priv2 __iomem *priv2 = spu->priv2;
162 u64 tmp;
163
164 spin_lock_irq(&spu->register_lock);
165 tmp = in_be64(&priv2->spu_cfg_RW);
166 if (val)
167 tmp |= 1;
168 else
169 tmp &= ~1;
170 out_be64(&priv2->spu_cfg_RW, tmp);
171 spin_unlock_irq(&spu->register_lock);
172}
173
174static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
175{
176 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
177}
178
179static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
180{
181 struct spu *spu = ctx->spu;
182 struct spu_priv2 __iomem *priv2 = spu->priv2;
183 u64 tmp;
184
185 spin_lock_irq(&spu->register_lock);
186 tmp = in_be64(&priv2->spu_cfg_RW);
187 if (val)
188 tmp |= 2;
189 else
190 tmp &= ~2;
191 out_be64(&priv2->spu_cfg_RW, tmp);
192 spin_unlock_irq(&spu->register_lock);
193}
194
195static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
196{
197 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
198}
199
200static u32 spu_hw_npc_read(struct spu_context *ctx)
201{
202 return in_be32(&ctx->spu->problem->spu_npc_RW);
203}
204
205static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
206{
207 out_be32(&ctx->spu->problem->spu_npc_RW, val);
208}
209
210static u32 spu_hw_status_read(struct spu_context *ctx)
211{
212 return in_be32(&ctx->spu->problem->spu_status_R);
213}
214
215static char *spu_hw_get_ls(struct spu_context *ctx)
216{
217 return ctx->spu->local_store;
218}
219
220static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
221{
222 eieio();
223 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
224}
225
226static void spu_hw_runcntl_stop(struct spu_context *ctx)
227{
228 spin_lock_irq(&ctx->spu->register_lock);
229 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
230 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
231 cpu_relax();
232 spin_unlock_irq(&ctx->spu->register_lock);
233}
234
235struct spu_context_ops spu_hw_ops = {
236 .mbox_read = spu_hw_mbox_read,
237 .mbox_stat_read = spu_hw_mbox_stat_read,
238 .mbox_stat_poll = spu_hw_mbox_stat_poll,
239 .ibox_read = spu_hw_ibox_read,
240 .wbox_write = spu_hw_wbox_write,
241 .signal1_read = spu_hw_signal1_read,
242 .signal1_write = spu_hw_signal1_write,
243 .signal2_read = spu_hw_signal2_read,
244 .signal2_write = spu_hw_signal2_write,
245 .signal1_type_set = spu_hw_signal1_type_set,
246 .signal1_type_get = spu_hw_signal1_type_get,
247 .signal2_type_set = spu_hw_signal2_type_set,
248 .signal2_type_get = spu_hw_signal2_type_get,
249 .npc_read = spu_hw_npc_read,
250 .npc_write = spu_hw_npc_write,
251 .status_read = spu_hw_status_read,
252 .get_ls = spu_hw_get_ls,
253 .runcntl_write = spu_hw_runcntl_write,
254 .runcntl_stop = spu_hw_runcntl_stop,
255};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
new file mode 100644
index 00000000000..1f3507c75e9
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -0,0 +1,486 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/backing-dev.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/module.h>
29#include <linux/mount.h>
30#include <linux/namei.h>
31#include <linux/pagemap.h>
32#include <linux/poll.h>
33#include <linux/slab.h>
34#include <linux/parser.h>
35
36#include <asm/io.h>
37#include <asm/semaphore.h>
38#include <asm/spu.h>
39#include <asm/uaccess.h>
40
41#include "spufs.h"
42
43static kmem_cache_t *spufs_inode_cache;
44
45static struct inode *
46spufs_alloc_inode(struct super_block *sb)
47{
48 struct spufs_inode_info *ei;
49
50 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
51 if (!ei)
52 return NULL;
53 return &ei->vfs_inode;
54}
55
56static void
57spufs_destroy_inode(struct inode *inode)
58{
59 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
60}
61
62static void
63spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
64{
65 struct spufs_inode_info *ei = p;
66
67 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
68 SLAB_CTOR_CONSTRUCTOR) {
69 inode_init_once(&ei->vfs_inode);
70 }
71}
72
73static struct inode *
74spufs_new_inode(struct super_block *sb, int mode)
75{
76 struct inode *inode;
77
78 inode = new_inode(sb);
79 if (!inode)
80 goto out;
81
82 inode->i_mode = mode;
83 inode->i_uid = current->fsuid;
84 inode->i_gid = current->fsgid;
85 inode->i_blksize = PAGE_CACHE_SIZE;
86 inode->i_blocks = 0;
87 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
88out:
89 return inode;
90}
91
92static int
93spufs_setattr(struct dentry *dentry, struct iattr *attr)
94{
95 struct inode *inode = dentry->d_inode;
96
97 if ((attr->ia_valid & ATTR_SIZE) &&
98 (attr->ia_size != inode->i_size))
99 return -EINVAL;
100 return inode_setattr(inode, attr);
101}
102
103
104static int
105spufs_new_file(struct super_block *sb, struct dentry *dentry,
106 struct file_operations *fops, int mode,
107 struct spu_context *ctx)
108{
109 static struct inode_operations spufs_file_iops = {
110 .setattr = spufs_setattr,
111 };
112 struct inode *inode;
113 int ret;
114
115 ret = -ENOSPC;
116 inode = spufs_new_inode(sb, S_IFREG | mode);
117 if (!inode)
118 goto out;
119
120 ret = 0;
121 inode->i_op = &spufs_file_iops;
122 inode->i_fop = fops;
123 inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
124 d_add(dentry, inode);
125out:
126 return ret;
127}
128
129static void
130spufs_delete_inode(struct inode *inode)
131{
132 if (SPUFS_I(inode)->i_ctx)
133 put_spu_context(SPUFS_I(inode)->i_ctx);
134 clear_inode(inode);
135}
136
137static void spufs_prune_dir(struct dentry *dir)
138{
139 struct dentry *dentry, *tmp;
140 down(&dir->d_inode->i_sem);
141 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
142 spin_lock(&dcache_lock);
143 spin_lock(&dentry->d_lock);
144 if (!(d_unhashed(dentry)) && dentry->d_inode) {
145 dget_locked(dentry);
146 __d_drop(dentry);
147 spin_unlock(&dentry->d_lock);
148 simple_unlink(dir->d_inode, dentry);
149 spin_unlock(&dcache_lock);
150 dput(dentry);
151 } else {
152 spin_unlock(&dentry->d_lock);
153 spin_unlock(&dcache_lock);
154 }
155 }
156 shrink_dcache_parent(dir);
157 up(&dir->d_inode->i_sem);
158}
159
160static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
161{
162 struct spu_context *ctx;
163
164 /* remove all entries */
165 down(&root->i_sem);
166 spufs_prune_dir(dir_dentry);
167 up(&root->i_sem);
168
169 /* We have to give up the mm_struct */
170 ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
171 spu_forget(ctx);
172
173 /* XXX Do we need to hold i_sem here ? */
174 return simple_rmdir(root, dir_dentry);
175}
176
177static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
178 int mode, struct spu_context *ctx)
179{
180 struct dentry *dentry;
181 int ret;
182
183 while (files->name && files->name[0]) {
184 ret = -ENOMEM;
185 dentry = d_alloc_name(dir, files->name);
186 if (!dentry)
187 goto out;
188 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
189 files->mode & mode, ctx);
190 if (ret)
191 goto out;
192 files++;
193 }
194 return 0;
195out:
196 spufs_prune_dir(dir);
197 return ret;
198}
199
200static int spufs_dir_close(struct inode *inode, struct file *file)
201{
202 struct inode *dir;
203 struct dentry *dentry;
204 int ret;
205
206 dentry = file->f_dentry;
207 dir = dentry->d_parent->d_inode;
208
209 ret = spufs_rmdir(dir, dentry);
210 WARN_ON(ret);
211
212 return dcache_dir_close(inode, file);
213}
214
215struct inode_operations spufs_dir_inode_operations = {
216 .lookup = simple_lookup,
217};
218
219struct file_operations spufs_context_fops = {
220 .open = dcache_dir_open,
221 .release = spufs_dir_close,
222 .llseek = dcache_dir_lseek,
223 .read = generic_read_dir,
224 .readdir = dcache_readdir,
225 .fsync = simple_sync_file,
226};
227
228static int
229spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
230{
231 int ret;
232 struct inode *inode;
233 struct spu_context *ctx;
234
235 ret = -ENOSPC;
236 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
237 if (!inode)
238 goto out;
239
240 if (dir->i_mode & S_ISGID) {
241 inode->i_gid = dir->i_gid;
242 inode->i_mode &= S_ISGID;
243 }
244 ctx = alloc_spu_context(inode->i_mapping);
245 SPUFS_I(inode)->i_ctx = ctx;
246 if (!ctx)
247 goto out_iput;
248
249 inode->i_op = &spufs_dir_inode_operations;
250 inode->i_fop = &simple_dir_operations;
251 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
252 if (ret)
253 goto out_free_ctx;
254
255 d_instantiate(dentry, inode);
256 dget(dentry);
257 dir->i_nlink++;
258 dentry->d_inode->i_nlink++;
259 goto out;
260
261out_free_ctx:
262 put_spu_context(ctx);
263out_iput:
264 iput(inode);
265out:
266 return ret;
267}
268
269static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
270{
271 int ret;
272 struct file *filp;
273
274 ret = get_unused_fd();
275 if (ret < 0) {
276 dput(dentry);
277 mntput(mnt);
278 goto out;
279 }
280
281 filp = dentry_open(dentry, mnt, O_RDONLY);
282 if (IS_ERR(filp)) {
283 put_unused_fd(ret);
284 ret = PTR_ERR(filp);
285 goto out;
286 }
287
288 filp->f_op = &spufs_context_fops;
289 fd_install(ret, filp);
290out:
291 return ret;
292}
293
294static struct file_system_type spufs_type;
295
296long spufs_create_thread(struct nameidata *nd,
297 unsigned int flags, mode_t mode)
298{
299 struct dentry *dentry;
300 int ret;
301
302 /* need to be at the root of spufs */
303 ret = -EINVAL;
304 if (nd->dentry->d_sb->s_type != &spufs_type ||
305 nd->dentry != nd->dentry->d_sb->s_root)
306 goto out;
307
308 dentry = lookup_create(nd, 1);
309 ret = PTR_ERR(dentry);
310 if (IS_ERR(dentry))
311 goto out_dir;
312
313 ret = -EEXIST;
314 if (dentry->d_inode)
315 goto out_dput;
316
317 mode &= ~current->fs->umask;
318 ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
319 if (ret)
320 goto out_dput;
321
322 /*
323 * get references for dget and mntget, will be released
324 * in error path of *_open().
325 */
326 ret = spufs_context_open(dget(dentry), mntget(nd->mnt));
327 if (ret < 0)
328 spufs_rmdir(nd->dentry->d_inode, dentry);
329
330out_dput:
331 dput(dentry);
332out_dir:
333 up(&nd->dentry->d_inode->i_sem);
334out:
335 return ret;
336}
337
338/* File system initialization */
339enum {
340 Opt_uid, Opt_gid, Opt_err,
341};
342
343static match_table_t spufs_tokens = {
344 { Opt_uid, "uid=%d" },
345 { Opt_gid, "gid=%d" },
346 { Opt_err, NULL },
347};
348
349static int
350spufs_parse_options(char *options, struct inode *root)
351{
352 char *p;
353 substring_t args[MAX_OPT_ARGS];
354
355 while ((p = strsep(&options, ",")) != NULL) {
356 int token, option;
357
358 if (!*p)
359 continue;
360
361 token = match_token(p, spufs_tokens, args);
362 switch (token) {
363 case Opt_uid:
364 if (match_int(&args[0], &option))
365 return 0;
366 root->i_uid = option;
367 break;
368 case Opt_gid:
369 if (match_int(&args[0], &option))
370 return 0;
371 root->i_gid = option;
372 break;
373 default:
374 return 0;
375 }
376 }
377 return 1;
378}
379
380static int
381spufs_create_root(struct super_block *sb, void *data)
382{
383 struct inode *inode;
384 int ret;
385
386 ret = -ENOMEM;
387 inode = spufs_new_inode(sb, S_IFDIR | 0775);
388 if (!inode)
389 goto out;
390
391 inode->i_op = &spufs_dir_inode_operations;
392 inode->i_fop = &simple_dir_operations;
393 SPUFS_I(inode)->i_ctx = NULL;
394
395 ret = -EINVAL;
396 if (!spufs_parse_options(data, inode))
397 goto out_iput;
398
399 ret = -ENOMEM;
400 sb->s_root = d_alloc_root(inode);
401 if (!sb->s_root)
402 goto out_iput;
403
404 return 0;
405out_iput:
406 iput(inode);
407out:
408 return ret;
409}
410
411static int
412spufs_fill_super(struct super_block *sb, void *data, int silent)
413{
414 static struct super_operations s_ops = {
415 .alloc_inode = spufs_alloc_inode,
416 .destroy_inode = spufs_destroy_inode,
417 .statfs = simple_statfs,
418 .delete_inode = spufs_delete_inode,
419 .drop_inode = generic_delete_inode,
420 };
421
422 sb->s_maxbytes = MAX_LFS_FILESIZE;
423 sb->s_blocksize = PAGE_CACHE_SIZE;
424 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
425 sb->s_magic = SPUFS_MAGIC;
426 sb->s_op = &s_ops;
427
428 return spufs_create_root(sb, data);
429}
430
431static struct super_block *
432spufs_get_sb(struct file_system_type *fstype, int flags,
433 const char *name, void *data)
434{
435 return get_sb_single(fstype, flags, data, spufs_fill_super);
436}
437
438static struct file_system_type spufs_type = {
439 .owner = THIS_MODULE,
440 .name = "spufs",
441 .get_sb = spufs_get_sb,
442 .kill_sb = kill_litter_super,
443};
444
445static int spufs_init(void)
446{
447 int ret;
448 ret = -ENOMEM;
449 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
450 sizeof(struct spufs_inode_info), 0,
451 SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
452
453 if (!spufs_inode_cache)
454 goto out;
455 if (spu_sched_init() != 0) {
456 kmem_cache_destroy(spufs_inode_cache);
457 goto out;
458 }
459 ret = register_filesystem(&spufs_type);
460 if (ret)
461 goto out_cache;
462 ret = register_spu_syscalls(&spufs_calls);
463 if (ret)
464 goto out_fs;
465 return 0;
466out_fs:
467 unregister_filesystem(&spufs_type);
468out_cache:
469 kmem_cache_destroy(spufs_inode_cache);
470out:
471 return ret;
472}
473module_init(spufs_init);
474
475static void spufs_exit(void)
476{
477 spu_sched_exit();
478 unregister_spu_syscalls(&spufs_calls);
479 unregister_filesystem(&spufs_type);
480 kmem_cache_destroy(spufs_inode_cache);
481}
482module_exit(spufs_exit);
483
484MODULE_LICENSE("GPL");
485MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
486
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
new file mode 100644
index 00000000000..18ea8866c61
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -0,0 +1,131 @@
1#include <linux/wait.h>
2#include <linux/ptrace.h>
3
4#include <asm/spu.h>
5
6#include "spufs.h"
7
8/* interrupt-level stop callback function. */
9void spufs_stop_callback(struct spu *spu)
10{
11 struct spu_context *ctx = spu->ctx;
12
13 wake_up_all(&ctx->stop_wq);
14}
15
16static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
17{
18 struct spu *spu;
19 u64 pte_fault;
20
21 *stat = ctx->ops->status_read(ctx);
22 if (ctx->state != SPU_STATE_RUNNABLE)
23 return 1;
24 spu = ctx->spu;
25 pte_fault = spu->dsisr &
26 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
27 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
28}
29
30static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
31 u32 * status)
32{
33 int ret;
34
35 if ((ret = spu_acquire_runnable(ctx)) != 0)
36 return ret;
37 ctx->ops->npc_write(ctx, *npc);
38 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
39 return 0;
40}
41
42static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
43 u32 * status)
44{
45 int ret = 0;
46
47 *status = ctx->ops->status_read(ctx);
48 *npc = ctx->ops->npc_read(ctx);
49 spu_release(ctx);
50
51 if (signal_pending(current))
52 ret = -ERESTARTSYS;
53 if (unlikely(current->ptrace & PT_PTRACED)) {
54 if ((*status & SPU_STATUS_STOPPED_BY_STOP)
55 && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
56 force_sig(SIGTRAP, current);
57 ret = -ERESTARTSYS;
58 }
59 }
60 return ret;
61}
62
63static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
64 u32 *status)
65{
66 int ret;
67
68 if ((ret = spu_run_fini(ctx, npc, status)) != 0)
69 return ret;
70 if (*status & (SPU_STATUS_STOPPED_BY_STOP |
71 SPU_STATUS_STOPPED_BY_HALT)) {
72 return *status;
73 }
74 if ((ret = spu_run_init(ctx, npc, status)) != 0)
75 return ret;
76 return 0;
77}
78
79static inline int spu_process_events(struct spu_context *ctx)
80{
81 struct spu *spu = ctx->spu;
82 u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
83 int ret = 0;
84
85 if (spu->dsisr & pte_fault)
86 ret = spu_irq_class_1_bottom(spu);
87 if (spu->class_0_pending)
88 ret = spu_irq_class_0_bottom(spu);
89 if (!ret && signal_pending(current))
90 ret = -ERESTARTSYS;
91 return ret;
92}
93
94long spufs_run_spu(struct file *file, struct spu_context *ctx,
95 u32 * npc, u32 * status)
96{
97 int ret;
98
99 if (down_interruptible(&ctx->run_sema))
100 return -ERESTARTSYS;
101
102 ret = spu_run_init(ctx, npc, status);
103 if (ret)
104 goto out;
105
106 do {
107 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
108 if (unlikely(ret))
109 break;
110 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
111 ret = spu_reacquire_runnable(ctx, npc, status);
112 if (ret)
113 goto out;
114 continue;
115 }
116 ret = spu_process_events(ctx);
117
118 } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
119 SPU_STATUS_STOPPED_BY_HALT)));
120
121 ctx->ops->runcntl_stop(ctx);
122 ret = spu_run_fini(ctx, npc, status);
123 if (!ret)
124 ret = *status;
125 spu_yield(ctx);
126
127out:
128 up(&ctx->run_sema);
129 return ret;
130}
131
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
new file mode 100644
index 00000000000..963182fbd1a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -0,0 +1,461 @@
1/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * SPU scheduler, based on Linux thread priority. For now use
7 * a simple "cooperative" yield model with no preemption. SPU
8 * scheduling will eventually be preemptive: When a thread with
9 * a higher static priority gets ready to run, then an active SPU
10 * context will be preempted and returned to the waitq.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#undef DEBUG
28
29#include <linux/config.h>
30#include <linux/module.h>
31#include <linux/errno.h>
32#include <linux/sched.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/completion.h>
36#include <linux/vmalloc.h>
37#include <linux/smp.h>
38#include <linux/smp_lock.h>
39#include <linux/stddef.h>
40#include <linux/unistd.h>
41
42#include <asm/io.h>
43#include <asm/mmu_context.h>
44#include <asm/spu.h>
45#include <asm/spu_csa.h>
46#include "spufs.h"
47
48#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
49
50#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
51struct spu_prio_array {
52 atomic_t nr_blocked;
53 unsigned long bitmap[SPU_BITMAP_SIZE];
54 wait_queue_head_t waitq[MAX_PRIO];
55};
56
57/* spu_runqueue - This is the main runqueue data structure for SPUs. */
58struct spu_runqueue {
59 struct semaphore sem;
60 unsigned long nr_active;
61 unsigned long nr_idle;
62 unsigned long nr_switches;
63 struct list_head active_list;
64 struct list_head idle_list;
65 struct spu_prio_array prio;
66};
67
68static struct spu_runqueue *spu_runqueues = NULL;
69
70static inline struct spu_runqueue *spu_rq(void)
71{
72 /* Future: make this a per-NODE array,
73 * and use cpu_to_node(smp_processor_id())
74 */
75 return spu_runqueues;
76}
77
78static inline struct spu *del_idle(struct spu_runqueue *rq)
79{
80 struct spu *spu;
81
82 BUG_ON(rq->nr_idle <= 0);
83 BUG_ON(list_empty(&rq->idle_list));
84 /* Future: Move SPU out of low-power SRI state. */
85 spu = list_entry(rq->idle_list.next, struct spu, sched_list);
86 list_del_init(&spu->sched_list);
87 rq->nr_idle--;
88 return spu;
89}
90
91static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
92{
93 BUG_ON(rq->nr_active <= 0);
94 BUG_ON(list_empty(&rq->active_list));
95 list_del_init(&spu->sched_list);
96 rq->nr_active--;
97}
98
99static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
100{
101 /* Future: Put SPU into low-power SRI state. */
102 list_add_tail(&spu->sched_list, &rq->idle_list);
103 rq->nr_idle++;
104}
105
106static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
107{
108 rq->nr_active++;
109 rq->nr_switches++;
110 list_add_tail(&spu->sched_list, &rq->active_list);
111}
112
113static void prio_wakeup(struct spu_runqueue *rq)
114{
115 if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
116 int best = sched_find_first_bit(rq->prio.bitmap);
117 if (best < MAX_PRIO) {
118 wait_queue_head_t *wq = &rq->prio.waitq[best];
119 wake_up_interruptible_nr(wq, 1);
120 }
121 }
122}
123
124static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
125 u64 flags)
126{
127 int prio = current->prio;
128 wait_queue_head_t *wq = &rq->prio.waitq[prio];
129 DEFINE_WAIT(wait);
130
131 __set_bit(prio, rq->prio.bitmap);
132 atomic_inc(&rq->prio.nr_blocked);
133 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
134 if (!signal_pending(current)) {
135 up(&rq->sem);
136 up_write(&ctx->state_sema);
137 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
138 current->pid, current->prio);
139 schedule();
140 down_write(&ctx->state_sema);
141 down(&rq->sem);
142 }
143 finish_wait(wq, &wait);
144 atomic_dec(&rq->prio.nr_blocked);
145 if (!waitqueue_active(wq))
146 __clear_bit(prio, rq->prio.bitmap);
147}
148
149static inline int is_best_prio(struct spu_runqueue *rq)
150{
151 int best_prio;
152
153 best_prio = sched_find_first_bit(rq->prio.bitmap);
154 return (current->prio < best_prio) ? 1 : 0;
155}
156
157static inline void mm_needs_global_tlbie(struct mm_struct *mm)
158{
159 /* Global TLBIE broadcast required with SPEs. */
160#if (NR_CPUS > 1)
161 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
162#else
163 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
164#endif
165}
166
167static inline void bind_context(struct spu *spu, struct spu_context *ctx)
168{
169 pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
170 spu->number);
171 spu->ctx = ctx;
172 spu->flags = 0;
173 ctx->flags = 0;
174 ctx->spu = spu;
175 ctx->ops = &spu_hw_ops;
176 spu->pid = current->pid;
177 spu->prio = current->prio;
178 spu->mm = ctx->owner;
179 mm_needs_global_tlbie(spu->mm);
180 spu->ibox_callback = spufs_ibox_callback;
181 spu->wbox_callback = spufs_wbox_callback;
182 spu->stop_callback = spufs_stop_callback;
183 mb();
184 spu_unmap_mappings(ctx);
185 spu_restore(&ctx->csa, spu);
186 spu->timestamp = jiffies;
187}
188
189static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
190{
191 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
192 spu->pid, spu->number);
193 spu_unmap_mappings(ctx);
194 spu_save(&ctx->csa, spu);
195 spu->timestamp = jiffies;
196 ctx->state = SPU_STATE_SAVED;
197 spu->ibox_callback = NULL;
198 spu->wbox_callback = NULL;
199 spu->stop_callback = NULL;
200 spu->mm = NULL;
201 spu->pid = 0;
202 spu->prio = MAX_PRIO;
203 ctx->ops = &spu_backing_ops;
204 ctx->spu = NULL;
205 ctx->flags = 0;
206 spu->flags = 0;
207 spu->ctx = NULL;
208}
209
210static void spu_reaper(void *data)
211{
212 struct spu_context *ctx = data;
213 struct spu *spu;
214
215 down_write(&ctx->state_sema);
216 spu = ctx->spu;
217 if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
218 if (atomic_read(&spu->rq->prio.nr_blocked)) {
219 pr_debug("%s: spu=%d\n", __func__, spu->number);
220 ctx->ops->runcntl_stop(ctx);
221 spu_deactivate(ctx);
222 wake_up_all(&ctx->stop_wq);
223 } else {
224 clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
225 }
226 }
227 up_write(&ctx->state_sema);
228 put_spu_context(ctx);
229}
230
231static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu)
232{
233 struct spu_context *ctx = get_spu_context(spu->ctx);
234 unsigned long now = jiffies;
235 unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE;
236
237 set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
238 INIT_WORK(&ctx->reap_work, spu_reaper, ctx);
239 if (time_after(now, expire))
240 schedule_work(&ctx->reap_work);
241 else
242 schedule_delayed_work(&ctx->reap_work, expire - now);
243}
244
245static void check_preempt_active(struct spu_runqueue *rq)
246{
247 struct list_head *p;
248 struct spu *worst = NULL;
249
250 list_for_each(p, &rq->active_list) {
251 struct spu *spu = list_entry(p, struct spu, sched_list);
252 struct spu_context *ctx = spu->ctx;
253 if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
254 if (!worst || (spu->prio > worst->prio)) {
255 worst = spu;
256 }
257 }
258 }
259 if (worst && (current->prio < worst->prio))
260 schedule_spu_reaper(rq, worst);
261}
262
263static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
264{
265 struct spu_runqueue *rq;
266 struct spu *spu = NULL;
267
268 rq = spu_rq();
269 down(&rq->sem);
270 for (;;) {
271 if (rq->nr_idle > 0) {
272 if (is_best_prio(rq)) {
273 /* Fall through. */
274 spu = del_idle(rq);
275 break;
276 } else {
277 prio_wakeup(rq);
278 up(&rq->sem);
279 yield();
280 if (signal_pending(current)) {
281 return NULL;
282 }
283 rq = spu_rq();
284 down(&rq->sem);
285 continue;
286 }
287 } else {
288 check_preempt_active(rq);
289 prio_wait(rq, ctx, flags);
290 if (signal_pending(current)) {
291 prio_wakeup(rq);
292 spu = NULL;
293 break;
294 }
295 continue;
296 }
297 }
298 up(&rq->sem);
299 return spu;
300}
301
302static void put_idle_spu(struct spu *spu)
303{
304 struct spu_runqueue *rq = spu->rq;
305
306 down(&rq->sem);
307 add_idle(rq, spu);
308 prio_wakeup(rq);
309 up(&rq->sem);
310}
311
312static int get_active_spu(struct spu *spu)
313{
314 struct spu_runqueue *rq = spu->rq;
315 struct list_head *p;
316 struct spu *tmp;
317 int rc = 0;
318
319 down(&rq->sem);
320 list_for_each(p, &rq->active_list) {
321 tmp = list_entry(p, struct spu, sched_list);
322 if (tmp == spu) {
323 del_active(rq, spu);
324 rc = 1;
325 break;
326 }
327 }
328 up(&rq->sem);
329 return rc;
330}
331
332static void put_active_spu(struct spu *spu)
333{
334 struct spu_runqueue *rq = spu->rq;
335
336 down(&rq->sem);
337 add_active(rq, spu);
338 up(&rq->sem);
339}
340
341/* Lock order:
342 * spu_activate() & spu_deactivate() require the
343 * caller to have down_write(&ctx->state_sema).
344 *
345 * The rq->sem is breifly held (inside or outside a
346 * given ctx lock) for list management, but is never
347 * held during save/restore.
348 */
349
350int spu_activate(struct spu_context *ctx, u64 flags)
351{
352 struct spu *spu;
353
354 if (ctx->spu)
355 return 0;
356 spu = get_idle_spu(ctx, flags);
357 if (!spu)
358 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
359 bind_context(spu, ctx);
360 /*
361 * We're likely to wait for interrupts on the same
362 * CPU that we are now on, so send them here.
363 */
364 spu_irq_setaffinity(spu, raw_smp_processor_id());
365 put_active_spu(spu);
366 return 0;
367}
368
369void spu_deactivate(struct spu_context *ctx)
370{
371 struct spu *spu;
372 int needs_idle;
373
374 spu = ctx->spu;
375 if (!spu)
376 return;
377 needs_idle = get_active_spu(spu);
378 unbind_context(spu, ctx);
379 if (needs_idle)
380 put_idle_spu(spu);
381}
382
383void spu_yield(struct spu_context *ctx)
384{
385 struct spu *spu;
386 int need_yield = 0;
387
388 down_write(&ctx->state_sema);
389 spu = ctx->spu;
390 if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
391 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
392 spu_deactivate(ctx);
393 ctx->state = SPU_STATE_SAVED;
394 need_yield = 1;
395 } else if (spu) {
396 spu->prio = MAX_PRIO;
397 }
398 up_write(&ctx->state_sema);
399 if (unlikely(need_yield))
400 yield();
401}
402
403int __init spu_sched_init(void)
404{
405 struct spu_runqueue *rq;
406 struct spu *spu;
407 int i;
408
409 rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
410 if (!rq) {
411 printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
412 __FUNCTION__);
413 return 1;
414 }
415 memset(rq, 0, sizeof(struct spu_runqueue));
416 init_MUTEX(&rq->sem);
417 INIT_LIST_HEAD(&rq->active_list);
418 INIT_LIST_HEAD(&rq->idle_list);
419 rq->nr_active = 0;
420 rq->nr_idle = 0;
421 rq->nr_switches = 0;
422 atomic_set(&rq->prio.nr_blocked, 0);
423 for (i = 0; i < MAX_PRIO; i++) {
424 init_waitqueue_head(&rq->prio.waitq[i]);
425 __clear_bit(i, rq->prio.bitmap);
426 }
427 __set_bit(MAX_PRIO, rq->prio.bitmap);
428 for (;;) {
429 spu = spu_alloc();
430 if (!spu)
431 break;
432 pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
433 add_idle(rq, spu);
434 spu->rq = rq;
435 spu->timestamp = jiffies;
436 }
437 if (!rq->nr_idle) {
438 printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
439 kfree(rq);
440 return 1;
441 }
442 return 0;
443}
444
445void __exit spu_sched_exit(void)
446{
447 struct spu_runqueue *rq = spu_rq();
448 struct spu *spu;
449
450 if (!rq) {
451 printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
452 return;
453 }
454 while (rq->nr_idle > 0) {
455 spu = del_idle(rq);
456 if (!spu)
457 break;
458 spu_free(spu);
459 }
460 kfree(rq);
461}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
new file mode 100644
index 00000000000..0bf723dcd67
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -0,0 +1,336 @@
1/*
2 * spu_restore.c
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * SPU-side context restore sequence outlined in
7 * Synergistic Processor Element Book IV
8 *
9 * Author: Mark Nutter <mnutter@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 */
26
27
28#ifndef LS_SIZE
29#define LS_SIZE 0x40000 /* 256K (in bytes) */
30#endif
31
32typedef unsigned int u32;
33typedef unsigned long long u64;
34
35#include <spu_intrinsics.h>
36#include <asm/spu_csa.h>
37#include "spu_utils.h"
38
39#define BR_INSTR 0x327fff80 /* br -4 */
40#define NOP_INSTR 0x40200000 /* nop */
41#define HEQ_INSTR 0x7b000000 /* heq $0, $0 */
42#define STOP_INSTR 0x00000000 /* stop 0x0 */
43#define ILLEGAL_INSTR 0x00800000 /* illegal instr */
44#define RESTORE_COMPLETE 0x00003ffc /* stop 0x3ffc */
45
46static inline void fetch_regs_from_mem(addr64 lscsa_ea)
47{
48 unsigned int ls = (unsigned int)&regs_spill[0];
49 unsigned int size = sizeof(regs_spill);
50 unsigned int tag_id = 0;
51 unsigned int cmd = 0x40; /* GET */
52
53 spu_writech(MFC_LSA, ls);
54 spu_writech(MFC_EAH, lscsa_ea.ui[0]);
55 spu_writech(MFC_EAL, lscsa_ea.ui[1]);
56 spu_writech(MFC_Size, size);
57 spu_writech(MFC_TagID, tag_id);
58 spu_writech(MFC_Cmd, cmd);
59}
60
61static inline void restore_upper_240kb(addr64 lscsa_ea)
62{
63 unsigned int ls = 16384;
64 unsigned int list = (unsigned int)&dma_list[0];
65 unsigned int size = sizeof(dma_list);
66 unsigned int tag_id = 0;
67 unsigned int cmd = 0x44; /* GETL */
68
69 /* Restore, Step 4:
70 * Enqueue the GETL command (tag 0) to the MFC SPU command
71 * queue to transfer the upper 240 kb of LS from CSA.
72 */
73 spu_writech(MFC_LSA, ls);
74 spu_writech(MFC_EAH, lscsa_ea.ui[0]);
75 spu_writech(MFC_EAL, list);
76 spu_writech(MFC_Size, size);
77 spu_writech(MFC_TagID, tag_id);
78 spu_writech(MFC_Cmd, cmd);
79}
80
81static inline void restore_decr(void)
82{
83 unsigned int offset;
84 unsigned int decr_running;
85 unsigned int decr;
86
87 /* Restore, Step 6:
88 * If the LSCSA "decrementer running" flag is set
89 * then write the SPU_WrDec channel with the
90 * decrementer value from LSCSA.
91 */
92 offset = LSCSA_QW_OFFSET(decr_status);
93 decr_running = regs_spill[offset].slot[0];
94 if (decr_running) {
95 offset = LSCSA_QW_OFFSET(decr);
96 decr = regs_spill[offset].slot[0];
97 spu_writech(SPU_WrDec, decr);
98 }
99}
100
101static inline void write_ppu_mb(void)
102{
103 unsigned int offset;
104 unsigned int data;
105
106 /* Restore, Step 11:
107 * Write the MFC_WrOut_MB channel with the PPU_MB
108 * data from LSCSA.
109 */
110 offset = LSCSA_QW_OFFSET(ppu_mb);
111 data = regs_spill[offset].slot[0];
112 spu_writech(SPU_WrOutMbox, data);
113}
114
115static inline void write_ppuint_mb(void)
116{
117 unsigned int offset;
118 unsigned int data;
119
120 /* Restore, Step 12:
121 * Write the MFC_WrInt_MB channel with the PPUINT_MB
122 * data from LSCSA.
123 */
124 offset = LSCSA_QW_OFFSET(ppuint_mb);
125 data = regs_spill[offset].slot[0];
126 spu_writech(SPU_WrOutIntrMbox, data);
127}
128
129static inline void restore_fpcr(void)
130{
131 unsigned int offset;
132 vector unsigned int fpcr;
133
134 /* Restore, Step 13:
135 * Restore the floating-point status and control
136 * register from the LSCSA.
137 */
138 offset = LSCSA_QW_OFFSET(fpcr);
139 fpcr = regs_spill[offset].v;
140 spu_mtfpscr(fpcr);
141}
142
143static inline void restore_srr0(void)
144{
145 unsigned int offset;
146 unsigned int srr0;
147
148 /* Restore, Step 14:
149 * Restore the SPU SRR0 data from the LSCSA.
150 */
151 offset = LSCSA_QW_OFFSET(srr0);
152 srr0 = regs_spill[offset].slot[0];
153 spu_writech(SPU_WrSRR0, srr0);
154}
155
156static inline void restore_event_mask(void)
157{
158 unsigned int offset;
159 unsigned int event_mask;
160
161 /* Restore, Step 15:
162 * Restore the SPU_RdEventMsk data from the LSCSA.
163 */
164 offset = LSCSA_QW_OFFSET(event_mask);
165 event_mask = regs_spill[offset].slot[0];
166 spu_writech(SPU_WrEventMask, event_mask);
167}
168
169static inline void restore_tag_mask(void)
170{
171 unsigned int offset;
172 unsigned int tag_mask;
173
174 /* Restore, Step 16:
175 * Restore the SPU_RdTagMsk data from the LSCSA.
176 */
177 offset = LSCSA_QW_OFFSET(tag_mask);
178 tag_mask = regs_spill[offset].slot[0];
179 spu_writech(MFC_WrTagMask, tag_mask);
180}
181
182static inline void restore_complete(void)
183{
184 extern void exit_fini(void);
185 unsigned int *exit_instrs = (unsigned int *)exit_fini;
186 unsigned int offset;
187 unsigned int stopped_status;
188 unsigned int stopped_code;
189
190 /* Restore, Step 18:
191 * Issue a stop-and-signal instruction with
192 * "good context restore" signal value.
193 *
194 * Restore, Step 19:
195 * There may be additional instructions placed
196 * here by the PPE Sequence for SPU Context
197 * Restore in order to restore the correct
198 * "stopped state".
199 *
200 * This step is handled here by analyzing the
201 * LSCSA.stopped_status and then modifying the
202 * exit() function to behave appropriately.
203 */
204
205 offset = LSCSA_QW_OFFSET(stopped_status);
206 stopped_status = regs_spill[offset].slot[0];
207 stopped_code = regs_spill[offset].slot[1];
208
209 switch (stopped_status) {
210 case SPU_STOPPED_STATUS_P_I:
211 /* SPU_Status[P,I]=1. Add illegal instruction
212 * followed by stop-and-signal instruction after
213 * end of restore code.
214 */
215 exit_instrs[0] = RESTORE_COMPLETE;
216 exit_instrs[1] = ILLEGAL_INSTR;
217 exit_instrs[2] = STOP_INSTR | stopped_code;
218 break;
219 case SPU_STOPPED_STATUS_P_H:
220 /* SPU_Status[P,H]=1. Add 'heq $0, $0' followed
221 * by stop-and-signal instruction after end of
222 * restore code.
223 */
224 exit_instrs[0] = RESTORE_COMPLETE;
225 exit_instrs[1] = HEQ_INSTR;
226 exit_instrs[2] = STOP_INSTR | stopped_code;
227 break;
228 case SPU_STOPPED_STATUS_S_P:
229 /* SPU_Status[S,P]=1. Add nop instruction
230 * followed by 'br -4' after end of restore
231 * code.
232 */
233 exit_instrs[0] = RESTORE_COMPLETE;
234 exit_instrs[1] = STOP_INSTR | stopped_code;
235 exit_instrs[2] = NOP_INSTR;
236 exit_instrs[3] = BR_INSTR;
237 break;
238 case SPU_STOPPED_STATUS_S_I:
239 /* SPU_Status[S,I]=1. Add illegal instruction
240 * followed by 'br -4' after end of restore code.
241 */
242 exit_instrs[0] = RESTORE_COMPLETE;
243 exit_instrs[1] = ILLEGAL_INSTR;
244 exit_instrs[2] = NOP_INSTR;
245 exit_instrs[3] = BR_INSTR;
246 break;
247 case SPU_STOPPED_STATUS_I:
248 /* SPU_Status[I]=1. Add illegal instruction followed
249 * by infinite loop after end of restore sequence.
250 */
251 exit_instrs[0] = RESTORE_COMPLETE;
252 exit_instrs[1] = ILLEGAL_INSTR;
253 exit_instrs[2] = NOP_INSTR;
254 exit_instrs[3] = BR_INSTR;
255 break;
256 case SPU_STOPPED_STATUS_S:
257 /* SPU_Status[S]=1. Add two 'nop' instructions. */
258 exit_instrs[0] = RESTORE_COMPLETE;
259 exit_instrs[1] = NOP_INSTR;
260 exit_instrs[2] = NOP_INSTR;
261 exit_instrs[3] = BR_INSTR;
262 break;
263 case SPU_STOPPED_STATUS_H:
264 /* SPU_Status[H]=1. Add 'heq $0, $0' instruction
265 * after end of restore code.
266 */
267 exit_instrs[0] = RESTORE_COMPLETE;
268 exit_instrs[1] = HEQ_INSTR;
269 exit_instrs[2] = NOP_INSTR;
270 exit_instrs[3] = BR_INSTR;
271 break;
272 case SPU_STOPPED_STATUS_P:
273 /* SPU_Status[P]=1. Add stop-and-signal instruction
274 * after end of restore code.
275 */
276 exit_instrs[0] = RESTORE_COMPLETE;
277 exit_instrs[1] = STOP_INSTR | stopped_code;
278 break;
279 case SPU_STOPPED_STATUS_R:
280 /* SPU_Status[I,S,H,P,R]=0. Add infinite loop. */
281 exit_instrs[0] = RESTORE_COMPLETE;
282 exit_instrs[1] = NOP_INSTR;
283 exit_instrs[2] = NOP_INSTR;
284 exit_instrs[3] = BR_INSTR;
285 break;
286 default:
287 /* SPU_Status[R]=1. No additonal instructions. */
288 break;
289 }
290 spu_sync();
291}
292
293/**
294 * main - entry point for SPU-side context restore.
295 *
296 * This code deviates from the documented sequence in the
297 * following aspects:
298 *
299 * 1. The EA for LSCSA is passed from PPE in the
300 * signal notification channels.
301 * 2. The register spill area is pulled by SPU
302 * into LS, rather than pushed by PPE.
303 * 3. All 128 registers are restored by exit().
304 * 4. The exit() function is modified at run
305 * time in order to properly restore the
306 * SPU_Status register.
307 */
308int main()
309{
310 addr64 lscsa_ea;
311
312 lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1);
313 lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2);
314 fetch_regs_from_mem(lscsa_ea);
315
316 set_event_mask(); /* Step 1. */
317 set_tag_mask(); /* Step 2. */
318 build_dma_list(lscsa_ea); /* Step 3. */
319 restore_upper_240kb(lscsa_ea); /* Step 4. */
320 /* Step 5: done by 'exit'. */
321 restore_decr(); /* Step 6. */
322 enqueue_putllc(lscsa_ea); /* Step 7. */
323 set_tag_update(); /* Step 8. */
324 read_tag_status(); /* Step 9. */
325 read_llar_status(); /* Step 10. */
326 write_ppu_mb(); /* Step 11. */
327 write_ppuint_mb(); /* Step 12. */
328 restore_fpcr(); /* Step 13. */
329 restore_srr0(); /* Step 14. */
330 restore_event_mask(); /* Step 15. */
331 restore_tag_mask(); /* Step 16. */
332 /* Step 17. done by 'exit'. */
333 restore_complete(); /* Step 18. */
334
335 return 0;
336}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
new file mode 100644
index 00000000000..2905949debe
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
@@ -0,0 +1,116 @@
1/*
2 * crt0_r.S: Entry function for SPU-side context restore.
3 *
4 * Copyright (C) 2005 IBM
5 *
6 * Entry and exit function for SPU-side of the context restore
7 * sequence. Sets up an initial stack frame, then branches to
8 * 'main'. On return, restores all 128 registers from the LSCSA
9 * and exits.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/spu_csa.h>
28
29.data
30.align 7
31.globl regs_spill
32regs_spill:
33.space SIZEOF_SPU_SPILL_REGS, 0x0
34
35.text
36.global _start
37_start:
38 /* Initialize the stack pointer to point to 16368
39 * (16kb-16). The back chain pointer is initialized
40 * to NULL.
41 */
42 il $0, 0
43 il $SP, 16368
44 stqd $0, 0($SP)
45
46 /* Allocate a minimum stack frame for the called main.
47 * This is needed so that main has a place to save the
48 * link register when it calls another function.
49 */
50 stqd $SP, -160($SP)
51 ai $SP, $SP, -160
52
53 /* Call the program's main function. */
54 brsl $0, main
55
56.global exit
57.global _exit
58exit:
59_exit:
60 /* SPU Context Restore, Step 5: Restore the remaining 112 GPRs. */
61 ila $3, regs_spill + 256
62restore_regs:
63 lqr $4, restore_reg_insts
64restore_reg_loop:
65 ai $4, $4, 4
66 .balignl 16, 0x40200000
67restore_reg_insts: /* must be quad-word aligned. */
68 lqd $16, 0($3)
69 lqd $17, 16($3)
70 lqd $18, 32($3)
71 lqd $19, 48($3)
72 andi $5, $4, 0x7F
73 stqr $4, restore_reg_insts
74 ai $3, $3, 64
75 brnz $5, restore_reg_loop
76
77 /* SPU Context Restore Step 17: Restore the first 16 GPRs. */
78 lqa $0, regs_spill + 0
79 lqa $1, regs_spill + 16
80 lqa $2, regs_spill + 32
81 lqa $3, regs_spill + 48
82 lqa $4, regs_spill + 64
83 lqa $5, regs_spill + 80
84 lqa $6, regs_spill + 96
85 lqa $7, regs_spill + 112
86 lqa $8, regs_spill + 128
87 lqa $9, regs_spill + 144
88 lqa $10, regs_spill + 160
89 lqa $11, regs_spill + 176
90 lqa $12, regs_spill + 192
91 lqa $13, regs_spill + 208
92 lqa $14, regs_spill + 224
93 lqa $15, regs_spill + 240
94
95 /* Under normal circumstances, the 'exit' function
96 * terminates with 'stop SPU_RESTORE_COMPLETE',
97 * indicating that the SPU-side restore code has
98 * completed.
99 *
100 * However it is possible that instructions immediately
101 * following the 'stop 0x3ffc' have been modified at run
102 * time so as to recreate the exact SPU_Status settings
103 * from the application, e.g. illegal instruciton, halt,
104 * etc.
105 */
106.global exit_fini
107.global _exit_fini
108exit_fini:
109_exit_fini:
110 stop SPU_RESTORE_COMPLETE
111 stop 0
112 stop 0
113 stop 0
114
115 /* Pad the size of this crt0.o to be multiple of 16 bytes. */
116.balignl 16, 0x0
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
new file mode 100644
index 00000000000..1b2355ff703
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
@@ -0,0 +1,231 @@
1/*
2 * spu_restore_dump.h: Copyright (C) 2005 IBM.
3 * Hex-dump auto generated from spu_restore.c.
4 * Do not edit!
5 */
6static unsigned int spu_restore_code[] __page_aligned = {
70x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
80x1cd80081, 0x33001180, 0x42030003, 0x33800284,
90x1c010204, 0x40200000, 0x40200000, 0x40200000,
100x34000190, 0x34004191, 0x34008192, 0x3400c193,
110x141fc205, 0x23fffd84, 0x1c100183, 0x217ffa85,
120x3080a000, 0x3080a201, 0x3080a402, 0x3080a603,
130x3080a804, 0x3080aa05, 0x3080ac06, 0x3080ae07,
140x3080b008, 0x3080b209, 0x3080b40a, 0x3080b60b,
150x3080b80c, 0x3080ba0d, 0x3080bc0e, 0x3080be0f,
160x00003ffc, 0x00000000, 0x00000000, 0x00000000,
170x01a00182, 0x3ec00083, 0xb0a14103, 0x01a00204,
180x3ec10082, 0x4202800e, 0x04000703, 0xb0a14202,
190x21a00803, 0x3fbf028d, 0x3f20068d, 0x3fbe0682,
200x3fe30102, 0x21a00882, 0x3f82028f, 0x3fe3078f,
210x3fbf0784, 0x3f200204, 0x3fbe0204, 0x3fe30204,
220x04000203, 0x21a00903, 0x40848002, 0x21a00982,
230x40800003, 0x21a00a03, 0x40802002, 0x21a00a82,
240x21a00083, 0x40800082, 0x21a00b02, 0x10002818,
250x40a80002, 0x32800007, 0x4207000c, 0x18008208,
260x40a0000b, 0x4080020a, 0x40800709, 0x00200000,
270x42070002, 0x3ac30384, 0x1cffc489, 0x00200000,
280x18008383, 0x38830382, 0x4cffc486, 0x3ac28185,
290xb0408584, 0x28830382, 0x1c020387, 0x38828182,
300xb0408405, 0x1802c408, 0x28828182, 0x217ff886,
310x04000583, 0x21a00803, 0x3fbe0682, 0x3fe30102,
320x04000106, 0x21a00886, 0x04000603, 0x21a00903,
330x40803c02, 0x21a00982, 0x40800003, 0x04000184,
340x21a00a04, 0x40802202, 0x21a00a82, 0x42028005,
350x34208702, 0x21002282, 0x21a00804, 0x21a00886,
360x3fbf0782, 0x3f200102, 0x3fbe0102, 0x3fe30102,
370x21a00902, 0x40804003, 0x21a00983, 0x21a00a04,
380x40805a02, 0x21a00a82, 0x40800083, 0x21a00b83,
390x01a00c02, 0x01a00d83, 0x3420c282, 0x21a00e02,
400x34210283, 0x21a00f03, 0x34200284, 0x77400200,
410x3421c282, 0x21a00702, 0x34218283, 0x21a00083,
420x34214282, 0x21a00b02, 0x4200480c, 0x00200000,
430x1c010286, 0x34220284, 0x34220302, 0x0f608203,
440x5c024204, 0x3b81810b, 0x42013c02, 0x00200000,
450x18008185, 0x38808183, 0x3b814182, 0x21004e84,
460x4020007f, 0x35000100, 0x000004e0, 0x000002a0,
470x000002e8, 0x00000428, 0x00000360, 0x000002e8,
480x000004a0, 0x00000468, 0x000003c8, 0x00000360,
490x409ffe02, 0x30801203, 0x40800204, 0x3ec40085,
500x10009c09, 0x3ac10606, 0xb060c105, 0x4020007f,
510x4020007f, 0x20801203, 0x38810602, 0xb0408586,
520x28810602, 0x32004180, 0x34204702, 0x21a00382,
530x4020007f, 0x327fdc80, 0x409ffe02, 0x30801203,
540x40800204, 0x3ec40087, 0x40800405, 0x00200000,
550x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
560xb060c107, 0x20801203, 0x41004003, 0x38810602,
570x4020007f, 0xb0408188, 0x4020007f, 0x28810602,
580x41201002, 0x38814603, 0x10009c09, 0xb060c109,
590x4020007f, 0x28814603, 0x41193f83, 0x38818602,
600x60ffc003, 0xb040818a, 0x28818602, 0x32003080,
610x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
620x41201008, 0x10009c14, 0x40800405, 0x3ac10609,
630x40800606, 0x3ac1460a, 0xb060c107, 0x3ac1860b,
640x20801203, 0x38810602, 0xb0408409, 0x28810602,
650x38814603, 0xb060c40a, 0x4020007f, 0x28814603,
660x41193f83, 0x38818602, 0x60ffc003, 0xb040818b,
670x28818602, 0x32002380, 0x409ffe02, 0x30801204,
680x40800205, 0x3ec40083, 0x40800406, 0x3ac14607,
690x3ac18608, 0xb0810103, 0x41004002, 0x20801204,
700x4020007f, 0x38814603, 0x10009c0b, 0xb060c107,
710x4020007f, 0x4020007f, 0x28814603, 0x38818602,
720x4020007f, 0x4020007f, 0xb0408588, 0x28818602,
730x4020007f, 0x32001780, 0x409ffe02, 0x1000640e,
740x40800204, 0x30801203, 0x40800405, 0x3ec40087,
750x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
760xb060c107, 0x20801203, 0x413d8003, 0x38810602,
770x4020007f, 0x327fd780, 0x409ffe02, 0x10007f0c,
780x40800205, 0x30801204, 0x40800406, 0x3ec40083,
790x3ac14607, 0x3ac18608, 0xb0810103, 0x413d8002,
800x20801204, 0x38814603, 0x4020007f, 0x327feb80,
810x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
820x40800405, 0x1000650a, 0x40800606, 0x3ac10608,
830x3ac14609, 0x3ac1860a, 0xb060c107, 0x20801203,
840x38810602, 0xb0408588, 0x4020007f, 0x327fc980,
850x00400000, 0x40800003, 0x4020007f, 0x35000000,
860x00000000, 0x00000000, 0x00000000, 0x00000000,
870x00000000, 0x00000000, 0x00000000, 0x00000000,
880x00000000, 0x00000000, 0x00000000, 0x00000000,
890x00000000, 0x00000000, 0x00000000, 0x00000000,
900x00000000, 0x00000000, 0x00000000, 0x00000000,
910x00000000, 0x00000000, 0x00000000, 0x00000000,
920x00000000, 0x00000000, 0x00000000, 0x00000000,
930x00000000, 0x00000000, 0x00000000, 0x00000000,
940x00000000, 0x00000000, 0x00000000, 0x00000000,
950x00000000, 0x00000000, 0x00000000, 0x00000000,
960x00000000, 0x00000000, 0x00000000, 0x00000000,
970x00000000, 0x00000000, 0x00000000, 0x00000000,
980x00000000, 0x00000000, 0x00000000, 0x00000000,
990x00000000, 0x00000000, 0x00000000, 0x00000000,
1000x00000000, 0x00000000, 0x00000000, 0x00000000,
1010x00000000, 0x00000000, 0x00000000, 0x00000000,
1020x00000000, 0x00000000, 0x00000000, 0x00000000,
1030x00000000, 0x00000000, 0x00000000, 0x00000000,
1040x00000000, 0x00000000, 0x00000000, 0x00000000,
1050x00000000, 0x00000000, 0x00000000, 0x00000000,
1060x00000000, 0x00000000, 0x00000000, 0x00000000,
1070x00000000, 0x00000000, 0x00000000, 0x00000000,
1080x00000000, 0x00000000, 0x00000000, 0x00000000,
1090x00000000, 0x00000000, 0x00000000, 0x00000000,
1100x00000000, 0x00000000, 0x00000000, 0x00000000,
1110x00000000, 0x00000000, 0x00000000, 0x00000000,
1120x00000000, 0x00000000, 0x00000000, 0x00000000,
1130x00000000, 0x00000000, 0x00000000, 0x00000000,
1140x00000000, 0x00000000, 0x00000000, 0x00000000,
1150x00000000, 0x00000000, 0x00000000, 0x00000000,
1160x00000000, 0x00000000, 0x00000000, 0x00000000,
1170x00000000, 0x00000000, 0x00000000, 0x00000000,
1180x00000000, 0x00000000, 0x00000000, 0x00000000,
1190x00000000, 0x00000000, 0x00000000, 0x00000000,
1200x00000000, 0x00000000, 0x00000000, 0x00000000,
1210x00000000, 0x00000000, 0x00000000, 0x00000000,
1220x00000000, 0x00000000, 0x00000000, 0x00000000,
1230x00000000, 0x00000000, 0x00000000, 0x00000000,
1240x00000000, 0x00000000, 0x00000000, 0x00000000,
1250x00000000, 0x00000000, 0x00000000, 0x00000000,
1260x00000000, 0x00000000, 0x00000000, 0x00000000,
1270x00000000, 0x00000000, 0x00000000, 0x00000000,
1280x00000000, 0x00000000, 0x00000000, 0x00000000,
1290x00000000, 0x00000000, 0x00000000, 0x00000000,
1300x00000000, 0x00000000, 0x00000000, 0x00000000,
1310x00000000, 0x00000000, 0x00000000, 0x00000000,
1320x00000000, 0x00000000, 0x00000000, 0x00000000,
1330x00000000, 0x00000000, 0x00000000, 0x00000000,
1340x00000000, 0x00000000, 0x00000000, 0x00000000,
1350x00000000, 0x00000000, 0x00000000, 0x00000000,
1360x00000000, 0x00000000, 0x00000000, 0x00000000,
1370x00000000, 0x00000000, 0x00000000, 0x00000000,
1380x00000000, 0x00000000, 0x00000000, 0x00000000,
1390x00000000, 0x00000000, 0x00000000, 0x00000000,
1400x00000000, 0x00000000, 0x00000000, 0x00000000,
1410x00000000, 0x00000000, 0x00000000, 0x00000000,
1420x00000000, 0x00000000, 0x00000000, 0x00000000,
1430x00000000, 0x00000000, 0x00000000, 0x00000000,
1440x00000000, 0x00000000, 0x00000000, 0x00000000,
1450x00000000, 0x00000000, 0x00000000, 0x00000000,
1460x00000000, 0x00000000, 0x00000000, 0x00000000,
1470x00000000, 0x00000000, 0x00000000, 0x00000000,
1480x00000000, 0x00000000, 0x00000000, 0x00000000,
1490x00000000, 0x00000000, 0x00000000, 0x00000000,
1500x00000000, 0x00000000, 0x00000000, 0x00000000,
1510x00000000, 0x00000000, 0x00000000, 0x00000000,
1520x00000000, 0x00000000, 0x00000000, 0x00000000,
1530x00000000, 0x00000000, 0x00000000, 0x00000000,
1540x00000000, 0x00000000, 0x00000000, 0x00000000,
1550x00000000, 0x00000000, 0x00000000, 0x00000000,
1560x00000000, 0x00000000, 0x00000000, 0x00000000,
1570x00000000, 0x00000000, 0x00000000, 0x00000000,
1580x00000000, 0x00000000, 0x00000000, 0x00000000,
1590x00000000, 0x00000000, 0x00000000, 0x00000000,
1600x00000000, 0x00000000, 0x00000000, 0x00000000,
1610x00000000, 0x00000000, 0x00000000, 0x00000000,
1620x00000000, 0x00000000, 0x00000000, 0x00000000,
1630x00000000, 0x00000000, 0x00000000, 0x00000000,
1640x00000000, 0x00000000, 0x00000000, 0x00000000,
1650x00000000, 0x00000000, 0x00000000, 0x00000000,
1660x00000000, 0x00000000, 0x00000000, 0x00000000,
1670x00000000, 0x00000000, 0x00000000, 0x00000000,
1680x00000000, 0x00000000, 0x00000000, 0x00000000,
1690x00000000, 0x00000000, 0x00000000, 0x00000000,
1700x00000000, 0x00000000, 0x00000000, 0x00000000,
1710x00000000, 0x00000000, 0x00000000, 0x00000000,
1720x00000000, 0x00000000, 0x00000000, 0x00000000,
1730x00000000, 0x00000000, 0x00000000, 0x00000000,
1740x00000000, 0x00000000, 0x00000000, 0x00000000,
1750x00000000, 0x00000000, 0x00000000, 0x00000000,
1760x00000000, 0x00000000, 0x00000000, 0x00000000,
1770x00000000, 0x00000000, 0x00000000, 0x00000000,
1780x00000000, 0x00000000, 0x00000000, 0x00000000,
1790x00000000, 0x00000000, 0x00000000, 0x00000000,
1800x00000000, 0x00000000, 0x00000000, 0x00000000,
1810x00000000, 0x00000000, 0x00000000, 0x00000000,
1820x00000000, 0x00000000, 0x00000000, 0x00000000,
1830x00000000, 0x00000000, 0x00000000, 0x00000000,
1840x00000000, 0x00000000, 0x00000000, 0x00000000,
1850x00000000, 0x00000000, 0x00000000, 0x00000000,
1860x00000000, 0x00000000, 0x00000000, 0x00000000,
1870x00000000, 0x00000000, 0x00000000, 0x00000000,
1880x00000000, 0x00000000, 0x00000000, 0x00000000,
1890x00000000, 0x00000000, 0x00000000, 0x00000000,
1900x00000000, 0x00000000, 0x00000000, 0x00000000,
1910x00000000, 0x00000000, 0x00000000, 0x00000000,
1920x00000000, 0x00000000, 0x00000000, 0x00000000,
1930x00000000, 0x00000000, 0x00000000, 0x00000000,
1940x00000000, 0x00000000, 0x00000000, 0x00000000,
1950x00000000, 0x00000000, 0x00000000, 0x00000000,
1960x00000000, 0x00000000, 0x00000000, 0x00000000,
1970x00000000, 0x00000000, 0x00000000, 0x00000000,
1980x00000000, 0x00000000, 0x00000000, 0x00000000,
1990x00000000, 0x00000000, 0x00000000, 0x00000000,
2000x00000000, 0x00000000, 0x00000000, 0x00000000,
2010x00000000, 0x00000000, 0x00000000, 0x00000000,
2020x00000000, 0x00000000, 0x00000000, 0x00000000,
2030x00000000, 0x00000000, 0x00000000, 0x00000000,
2040x00000000, 0x00000000, 0x00000000, 0x00000000,
2050x00000000, 0x00000000, 0x00000000, 0x00000000,
2060x00000000, 0x00000000, 0x00000000, 0x00000000,
2070x00000000, 0x00000000, 0x00000000, 0x00000000,
2080x00000000, 0x00000000, 0x00000000, 0x00000000,
2090x00000000, 0x00000000, 0x00000000, 0x00000000,
2100x00000000, 0x00000000, 0x00000000, 0x00000000,
2110x00000000, 0x00000000, 0x00000000, 0x00000000,
2120x00000000, 0x00000000, 0x00000000, 0x00000000,
2130x00000000, 0x00000000, 0x00000000, 0x00000000,
2140x00000000, 0x00000000, 0x00000000, 0x00000000,
2150x00000000, 0x00000000, 0x00000000, 0x00000000,
2160x00000000, 0x00000000, 0x00000000, 0x00000000,
2170x00000000, 0x00000000, 0x00000000, 0x00000000,
2180x00000000, 0x00000000, 0x00000000, 0x00000000,
2190x00000000, 0x00000000, 0x00000000, 0x00000000,
2200x00000000, 0x00000000, 0x00000000, 0x00000000,
2210x00000000, 0x00000000, 0x00000000, 0x00000000,
2220x00000000, 0x00000000, 0x00000000, 0x00000000,
2230x00000000, 0x00000000, 0x00000000, 0x00000000,
2240x00000000, 0x00000000, 0x00000000, 0x00000000,
2250x00000000, 0x00000000, 0x00000000, 0x00000000,
2260x00000000, 0x00000000, 0x00000000, 0x00000000,
2270x00000000, 0x00000000, 0x00000000, 0x00000000,
2280x00000000, 0x00000000, 0x00000000, 0x00000000,
2290x00000000, 0x00000000, 0x00000000, 0x00000000,
2300x00000000, 0x00000000, 0x00000000, 0x00000000,
231};
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c
new file mode 100644
index 00000000000..196033b8a57
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save.c
@@ -0,0 +1,195 @@
1/*
2 * spu_save.c
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * SPU-side context save sequence outlined in
7 * Synergistic Processor Element Book IV
8 *
9 * Author: Mark Nutter <mnutter@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 */
26
27
28#ifndef LS_SIZE
29#define LS_SIZE 0x40000 /* 256K (in bytes) */
30#endif
31
32typedef unsigned int u32;
33typedef unsigned long long u64;
34
35#include <spu_intrinsics.h>
36#include <asm/spu_csa.h>
37#include "spu_utils.h"
38
39static inline void save_event_mask(void)
40{
41 unsigned int offset;
42
43 /* Save, Step 2:
44 * Read the SPU_RdEventMsk channel and save to the LSCSA.
45 */
46 offset = LSCSA_QW_OFFSET(event_mask);
47 regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
48}
49
50static inline void save_tag_mask(void)
51{
52 unsigned int offset;
53
54 /* Save, Step 3:
55 * Read the SPU_RdTagMsk channel and save to the LSCSA.
56 */
57 offset = LSCSA_QW_OFFSET(tag_mask);
58 regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask);
59}
60
61static inline void save_upper_240kb(addr64 lscsa_ea)
62{
63 unsigned int ls = 16384;
64 unsigned int list = (unsigned int)&dma_list[0];
65 unsigned int size = sizeof(dma_list);
66 unsigned int tag_id = 0;
67 unsigned int cmd = 0x24; /* PUTL */
68
69 /* Save, Step 7:
70 * Enqueue the PUTL command (tag 0) to the MFC SPU command
71 * queue to transfer the remaining 240 kb of LS to CSA.
72 */
73 spu_writech(MFC_LSA, ls);
74 spu_writech(MFC_EAH, lscsa_ea.ui[0]);
75 spu_writech(MFC_EAL, list);
76 spu_writech(MFC_Size, size);
77 spu_writech(MFC_TagID, tag_id);
78 spu_writech(MFC_Cmd, cmd);
79}
80
81static inline void save_fpcr(void)
82{
83 // vector unsigned int fpcr;
84 unsigned int offset;
85
86 /* Save, Step 9:
87 * Issue the floating-point status and control register
88 * read instruction, and save to the LSCSA.
89 */
90 offset = LSCSA_QW_OFFSET(fpcr);
91 regs_spill[offset].v = spu_mffpscr();
92}
93
94static inline void save_decr(void)
95{
96 unsigned int offset;
97
98 /* Save, Step 10:
99 * Read and save the SPU_RdDec channel data to
100 * the LSCSA.
101 */
102 offset = LSCSA_QW_OFFSET(decr);
103 regs_spill[offset].slot[0] = spu_readch(SPU_RdDec);
104}
105
106static inline void save_srr0(void)
107{
108 unsigned int offset;
109
110 /* Save, Step 11:
111 * Read and save the SPU_WSRR0 channel data to
112 * the LSCSA.
113 */
114 offset = LSCSA_QW_OFFSET(srr0);
115 regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0);
116}
117
118static inline void spill_regs_to_mem(addr64 lscsa_ea)
119{
120 unsigned int ls = (unsigned int)&regs_spill[0];
121 unsigned int size = sizeof(regs_spill);
122 unsigned int tag_id = 0;
123 unsigned int cmd = 0x20; /* PUT */
124
125 /* Save, Step 13:
126 * Enqueue a PUT command (tag 0) to send the LSCSA
127 * to the CSA.
128 */
129 spu_writech(MFC_LSA, ls);
130 spu_writech(MFC_EAH, lscsa_ea.ui[0]);
131 spu_writech(MFC_EAL, lscsa_ea.ui[1]);
132 spu_writech(MFC_Size, size);
133 spu_writech(MFC_TagID, tag_id);
134 spu_writech(MFC_Cmd, cmd);
135}
136
137static inline void enqueue_sync(addr64 lscsa_ea)
138{
139 unsigned int tag_id = 0;
140 unsigned int cmd = 0xCC;
141
142 /* Save, Step 14:
143 * Enqueue an MFC_SYNC command (tag 0).
144 */
145 spu_writech(MFC_TagID, tag_id);
146 spu_writech(MFC_Cmd, cmd);
147}
148
149static inline void save_complete(void)
150{
151 /* Save, Step 18:
152 * Issue a stop-and-signal instruction indicating
153 * "save complete". Note: This function will not
154 * return!!
155 */
156 spu_stop(SPU_SAVE_COMPLETE);
157}
158
159/**
160 * main - entry point for SPU-side context save.
161 *
162 * This code deviates from the documented sequence as follows:
163 *
164 * 1. The EA for LSCSA is passed from PPE in the
165 * signal notification channels.
166 * 2. All 128 registers are saved by crt0.o.
167 */
168int main()
169{
170 addr64 lscsa_ea;
171
172 lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1);
173 lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2);
174
175 /* Step 1: done by exit(). */
176 save_event_mask(); /* Step 2. */
177 save_tag_mask(); /* Step 3. */
178 set_event_mask(); /* Step 4. */
179 set_tag_mask(); /* Step 5. */
180 build_dma_list(lscsa_ea); /* Step 6. */
181 save_upper_240kb(lscsa_ea); /* Step 7. */
182 /* Step 8: done by exit(). */
183 save_fpcr(); /* Step 9. */
184 save_decr(); /* Step 10. */
185 save_srr0(); /* Step 11. */
186 enqueue_putllc(lscsa_ea); /* Step 12. */
187 spill_regs_to_mem(lscsa_ea); /* Step 13. */
188 enqueue_sync(lscsa_ea); /* Step 14. */
189 set_tag_update(); /* Step 15. */
190 read_tag_status(); /* Step 16. */
191 read_llar_status(); /* Step 17. */
192 save_complete(); /* Step 18. */
193
194 return 0;
195}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
new file mode 100644
index 00000000000..6659d6a66fa
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
@@ -0,0 +1,102 @@
1/*
2 * crt0_s.S: Entry function for SPU-side context save.
3 *
4 * Copyright (C) 2005 IBM
5 *
6 * Entry function for SPU-side of the context save sequence.
7 * Saves all 128 GPRs, sets up an initial stack frame, then
8 * branches to 'main'.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <asm/spu_csa.h>
27
28.data
29.align 7
30.globl regs_spill
31regs_spill:
32.space SIZEOF_SPU_SPILL_REGS, 0x0
33
34.text
35.global _start
36_start:
37 /* SPU Context Save Step 1: Save the first 16 GPRs. */
38 stqa $0, regs_spill + 0
39 stqa $1, regs_spill + 16
40 stqa $2, regs_spill + 32
41 stqa $3, regs_spill + 48
42 stqa $4, regs_spill + 64
43 stqa $5, regs_spill + 80
44 stqa $6, regs_spill + 96
45 stqa $7, regs_spill + 112
46 stqa $8, regs_spill + 128
47 stqa $9, regs_spill + 144
48 stqa $10, regs_spill + 160
49 stqa $11, regs_spill + 176
50 stqa $12, regs_spill + 192
51 stqa $13, regs_spill + 208
52 stqa $14, regs_spill + 224
53 stqa $15, regs_spill + 240
54
55 /* SPU Context Save, Step 8: Save the remaining 112 GPRs. */
56 ila $3, regs_spill + 256
57save_regs:
58 lqr $4, save_reg_insts
59save_reg_loop:
60 ai $4, $4, 4
61 .balignl 16, 0x40200000
62save_reg_insts: /* must be quad-word aligned. */
63 stqd $16, 0($3)
64 stqd $17, 16($3)
65 stqd $18, 32($3)
66 stqd $19, 48($3)
67 andi $5, $4, 0x7F
68 stqr $4, save_reg_insts
69 ai $3, $3, 64
70 brnz $5, save_reg_loop
71
72 /* Initialize the stack pointer to point to 16368
73 * (16kb-16). The back chain pointer is initialized
74 * to NULL.
75 */
76 il $0, 0
77 il $SP, 16368
78 stqd $0, 0($SP)
79
80 /* Allocate a minimum stack frame for the called main.
81 * This is needed so that main has a place to save the
82 * link register when it calls another function.
83 */
84 stqd $SP, -160($SP)
85 ai $SP, $SP, -160
86
87 /* Call the program's main function. */
88 brsl $0, main
89
90 /* In this case main should not return; if it does
91 * there has been an error in the sequence. Execute
92 * stop-and-signal with code=0.
93 */
94.global exit
95.global _exit
96exit:
97_exit:
98 stop 0x0
99
100 /* Pad the size of this crt0.o to be multiple of 16 bytes. */
101.balignl 16, 0x0
102
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
new file mode 100644
index 00000000000..39e54003f1d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
@@ -0,0 +1,191 @@
1/*
2 * spu_save_dump.h: Copyright (C) 2005 IBM.
3 * Hex-dump auto generated from spu_save.c.
4 * Do not edit!
5 */
6static unsigned int spu_save_code[] __page_aligned = {
70x20805000, 0x20805201, 0x20805402, 0x20805603,
80x20805804, 0x20805a05, 0x20805c06, 0x20805e07,
90x20806008, 0x20806209, 0x2080640a, 0x2080660b,
100x2080680c, 0x20806a0d, 0x20806c0e, 0x20806e0f,
110x4201c003, 0x33800184, 0x1c010204, 0x40200000,
120x24000190, 0x24004191, 0x24008192, 0x2400c193,
130x141fc205, 0x23fffd84, 0x1c100183, 0x217ffb85,
140x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
150x1cd80081, 0x33000180, 0x00000000, 0x00000000,
160x01a00182, 0x3ec00083, 0xb1c38103, 0x01a00204,
170x3ec10082, 0x4201400d, 0xb1c38202, 0x01a00583,
180x34218682, 0x3ed80684, 0xb0408184, 0x24218682,
190x01a00603, 0x00200000, 0x34214682, 0x3ed40684,
200xb0408184, 0x40800003, 0x24214682, 0x21a00083,
210x40800082, 0x21a00b02, 0x4020007f, 0x1000251e,
220x40a80002, 0x32800008, 0x4205c00c, 0x00200000,
230x40a0000b, 0x3f82070f, 0x4080020a, 0x40800709,
240x3fe3078f, 0x3fbf0783, 0x3f200183, 0x3fbe0183,
250x3fe30187, 0x18008387, 0x4205c002, 0x3ac30404,
260x1cffc489, 0x00200000, 0x18008403, 0x38830402,
270x4cffc486, 0x3ac28185, 0xb0408584, 0x28830402,
280x1c020408, 0x38828182, 0xb0408385, 0x1802c387,
290x28828182, 0x217ff886, 0x04000582, 0x32800007,
300x21a00802, 0x3fbf0705, 0x3f200285, 0x3fbe0285,
310x3fe30285, 0x21a00885, 0x04000603, 0x21a00903,
320x40803c02, 0x21a00982, 0x04000386, 0x21a00a06,
330x40801202, 0x21a00a82, 0x73000003, 0x24200683,
340x01a00404, 0x00200000, 0x34204682, 0x3ec40683,
350xb0408203, 0x24204682, 0x01a00783, 0x00200000,
360x3421c682, 0x3edc0684, 0xb0408184, 0x2421c682,
370x21a00806, 0x21a00885, 0x3fbf0784, 0x3f200204,
380x3fbe0204, 0x3fe30204, 0x21a00904, 0x40804002,
390x21a00982, 0x21a00a06, 0x40805a02, 0x21a00a82,
400x04000683, 0x21a00803, 0x21a00885, 0x21a00904,
410x40848002, 0x21a00982, 0x21a00a06, 0x40801002,
420x21a00a82, 0x21a00a06, 0x40806602, 0x00200000,
430x35800009, 0x21a00a82, 0x40800083, 0x21a00b83,
440x01a00c02, 0x01a00d83, 0x00003ffb, 0x40800003,
450x4020007f, 0x35000000, 0x00000000, 0x00000000,
460x00000000, 0x00000000, 0x00000000, 0x00000000,
470x00000000, 0x00000000, 0x00000000, 0x00000000,
480x00000000, 0x00000000, 0x00000000, 0x00000000,
490x00000000, 0x00000000, 0x00000000, 0x00000000,
500x00000000, 0x00000000, 0x00000000, 0x00000000,
510x00000000, 0x00000000, 0x00000000, 0x00000000,
520x00000000, 0x00000000, 0x00000000, 0x00000000,
530x00000000, 0x00000000, 0x00000000, 0x00000000,
540x00000000, 0x00000000, 0x00000000, 0x00000000,
550x00000000, 0x00000000, 0x00000000, 0x00000000,
560x00000000, 0x00000000, 0x00000000, 0x00000000,
570x00000000, 0x00000000, 0x00000000, 0x00000000,
580x00000000, 0x00000000, 0x00000000, 0x00000000,
590x00000000, 0x00000000, 0x00000000, 0x00000000,
600x00000000, 0x00000000, 0x00000000, 0x00000000,
610x00000000, 0x00000000, 0x00000000, 0x00000000,
620x00000000, 0x00000000, 0x00000000, 0x00000000,
630x00000000, 0x00000000, 0x00000000, 0x00000000,
640x00000000, 0x00000000, 0x00000000, 0x00000000,
650x00000000, 0x00000000, 0x00000000, 0x00000000,
660x00000000, 0x00000000, 0x00000000, 0x00000000,
670x00000000, 0x00000000, 0x00000000, 0x00000000,
680x00000000, 0x00000000, 0x00000000, 0x00000000,
690x00000000, 0x00000000, 0x00000000, 0x00000000,
700x00000000, 0x00000000, 0x00000000, 0x00000000,
710x00000000, 0x00000000, 0x00000000, 0x00000000,
720x00000000, 0x00000000, 0x00000000, 0x00000000,
730x00000000, 0x00000000, 0x00000000, 0x00000000,
740x00000000, 0x00000000, 0x00000000, 0x00000000,
750x00000000, 0x00000000, 0x00000000, 0x00000000,
760x00000000, 0x00000000, 0x00000000, 0x00000000,
770x00000000, 0x00000000, 0x00000000, 0x00000000,
780x00000000, 0x00000000, 0x00000000, 0x00000000,
790x00000000, 0x00000000, 0x00000000, 0x00000000,
800x00000000, 0x00000000, 0x00000000, 0x00000000,
810x00000000, 0x00000000, 0x00000000, 0x00000000,
820x00000000, 0x00000000, 0x00000000, 0x00000000,
830x00000000, 0x00000000, 0x00000000, 0x00000000,
840x00000000, 0x00000000, 0x00000000, 0x00000000,
850x00000000, 0x00000000, 0x00000000, 0x00000000,
860x00000000, 0x00000000, 0x00000000, 0x00000000,
870x00000000, 0x00000000, 0x00000000, 0x00000000,
880x00000000, 0x00000000, 0x00000000, 0x00000000,
890x00000000, 0x00000000, 0x00000000, 0x00000000,
900x00000000, 0x00000000, 0x00000000, 0x00000000,
910x00000000, 0x00000000, 0x00000000, 0x00000000,
920x00000000, 0x00000000, 0x00000000, 0x00000000,
930x00000000, 0x00000000, 0x00000000, 0x00000000,
940x00000000, 0x00000000, 0x00000000, 0x00000000,
950x00000000, 0x00000000, 0x00000000, 0x00000000,
960x00000000, 0x00000000, 0x00000000, 0x00000000,
970x00000000, 0x00000000, 0x00000000, 0x00000000,
980x00000000, 0x00000000, 0x00000000, 0x00000000,
990x00000000, 0x00000000, 0x00000000, 0x00000000,
1000x00000000, 0x00000000, 0x00000000, 0x00000000,
1010x00000000, 0x00000000, 0x00000000, 0x00000000,
1020x00000000, 0x00000000, 0x00000000, 0x00000000,
1030x00000000, 0x00000000, 0x00000000, 0x00000000,
1040x00000000, 0x00000000, 0x00000000, 0x00000000,
1050x00000000, 0x00000000, 0x00000000, 0x00000000,
1060x00000000, 0x00000000, 0x00000000, 0x00000000,
1070x00000000, 0x00000000, 0x00000000, 0x00000000,
1080x00000000, 0x00000000, 0x00000000, 0x00000000,
1090x00000000, 0x00000000, 0x00000000, 0x00000000,
1100x00000000, 0x00000000, 0x00000000, 0x00000000,
1110x00000000, 0x00000000, 0x00000000, 0x00000000,
1120x00000000, 0x00000000, 0x00000000, 0x00000000,
1130x00000000, 0x00000000, 0x00000000, 0x00000000,
1140x00000000, 0x00000000, 0x00000000, 0x00000000,
1150x00000000, 0x00000000, 0x00000000, 0x00000000,
1160x00000000, 0x00000000, 0x00000000, 0x00000000,
1170x00000000, 0x00000000, 0x00000000, 0x00000000,
1180x00000000, 0x00000000, 0x00000000, 0x00000000,
1190x00000000, 0x00000000, 0x00000000, 0x00000000,
1200x00000000, 0x00000000, 0x00000000, 0x00000000,
1210x00000000, 0x00000000, 0x00000000, 0x00000000,
1220x00000000, 0x00000000, 0x00000000, 0x00000000,
1230x00000000, 0x00000000, 0x00000000, 0x00000000,
1240x00000000, 0x00000000, 0x00000000, 0x00000000,
1250x00000000, 0x00000000, 0x00000000, 0x00000000,
1260x00000000, 0x00000000, 0x00000000, 0x00000000,
1270x00000000, 0x00000000, 0x00000000, 0x00000000,
1280x00000000, 0x00000000, 0x00000000, 0x00000000,
1290x00000000, 0x00000000, 0x00000000, 0x00000000,
1300x00000000, 0x00000000, 0x00000000, 0x00000000,
1310x00000000, 0x00000000, 0x00000000, 0x00000000,
1320x00000000, 0x00000000, 0x00000000, 0x00000000,
1330x00000000, 0x00000000, 0x00000000, 0x00000000,
1340x00000000, 0x00000000, 0x00000000, 0x00000000,
1350x00000000, 0x00000000, 0x00000000, 0x00000000,
1360x00000000, 0x00000000, 0x00000000, 0x00000000,
1370x00000000, 0x00000000, 0x00000000, 0x00000000,
1380x00000000, 0x00000000, 0x00000000, 0x00000000,
1390x00000000, 0x00000000, 0x00000000, 0x00000000,
1400x00000000, 0x00000000, 0x00000000, 0x00000000,
1410x00000000, 0x00000000, 0x00000000, 0x00000000,
1420x00000000, 0x00000000, 0x00000000, 0x00000000,
1430x00000000, 0x00000000, 0x00000000, 0x00000000,
1440x00000000, 0x00000000, 0x00000000, 0x00000000,
1450x00000000, 0x00000000, 0x00000000, 0x00000000,
1460x00000000, 0x00000000, 0x00000000, 0x00000000,
1470x00000000, 0x00000000, 0x00000000, 0x00000000,
1480x00000000, 0x00000000, 0x00000000, 0x00000000,
1490x00000000, 0x00000000, 0x00000000, 0x00000000,
1500x00000000, 0x00000000, 0x00000000, 0x00000000,
1510x00000000, 0x00000000, 0x00000000, 0x00000000,
1520x00000000, 0x00000000, 0x00000000, 0x00000000,
1530x00000000, 0x00000000, 0x00000000, 0x00000000,
1540x00000000, 0x00000000, 0x00000000, 0x00000000,
1550x00000000, 0x00000000, 0x00000000, 0x00000000,
1560x00000000, 0x00000000, 0x00000000, 0x00000000,
1570x00000000, 0x00000000, 0x00000000, 0x00000000,
1580x00000000, 0x00000000, 0x00000000, 0x00000000,
1590x00000000, 0x00000000, 0x00000000, 0x00000000,
1600x00000000, 0x00000000, 0x00000000, 0x00000000,
1610x00000000, 0x00000000, 0x00000000, 0x00000000,
1620x00000000, 0x00000000, 0x00000000, 0x00000000,
1630x00000000, 0x00000000, 0x00000000, 0x00000000,
1640x00000000, 0x00000000, 0x00000000, 0x00000000,
1650x00000000, 0x00000000, 0x00000000, 0x00000000,
1660x00000000, 0x00000000, 0x00000000, 0x00000000,
1670x00000000, 0x00000000, 0x00000000, 0x00000000,
1680x00000000, 0x00000000, 0x00000000, 0x00000000,
1690x00000000, 0x00000000, 0x00000000, 0x00000000,
1700x00000000, 0x00000000, 0x00000000, 0x00000000,
1710x00000000, 0x00000000, 0x00000000, 0x00000000,
1720x00000000, 0x00000000, 0x00000000, 0x00000000,
1730x00000000, 0x00000000, 0x00000000, 0x00000000,
1740x00000000, 0x00000000, 0x00000000, 0x00000000,
1750x00000000, 0x00000000, 0x00000000, 0x00000000,
1760x00000000, 0x00000000, 0x00000000, 0x00000000,
1770x00000000, 0x00000000, 0x00000000, 0x00000000,
1780x00000000, 0x00000000, 0x00000000, 0x00000000,
1790x00000000, 0x00000000, 0x00000000, 0x00000000,
1800x00000000, 0x00000000, 0x00000000, 0x00000000,
1810x00000000, 0x00000000, 0x00000000, 0x00000000,
1820x00000000, 0x00000000, 0x00000000, 0x00000000,
1830x00000000, 0x00000000, 0x00000000, 0x00000000,
1840x00000000, 0x00000000, 0x00000000, 0x00000000,
1850x00000000, 0x00000000, 0x00000000, 0x00000000,
1860x00000000, 0x00000000, 0x00000000, 0x00000000,
1870x00000000, 0x00000000, 0x00000000, 0x00000000,
1880x00000000, 0x00000000, 0x00000000, 0x00000000,
1890x00000000, 0x00000000, 0x00000000, 0x00000000,
1900x00000000, 0x00000000, 0x00000000, 0x00000000,
191};
diff --git a/arch/powerpc/platforms/cell/spufs/spu_utils.h b/arch/powerpc/platforms/cell/spufs/spu_utils.h
new file mode 100644
index 00000000000..58359feb6c9
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_utils.h
@@ -0,0 +1,160 @@
1/*
2 * utils.h: Utilities for SPU-side of the context switch operation.
3 *
4 * (C) Copyright IBM 2005
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _SPU_CONTEXT_UTILS_H_
22#define _SPU_CONTEXT_UTILS_H_
23
24/*
25 * 64-bit safe EA.
26 */
27typedef union {
28 unsigned long long ull;
29 unsigned int ui[2];
30} addr64;
31
32/*
33 * 128-bit register template.
34 */
35typedef union {
36 unsigned int slot[4];
37 vector unsigned int v;
38} spu_reg128v;
39
40/*
41 * DMA list structure.
42 */
43struct dma_list_elem {
44 unsigned int size;
45 unsigned int ea_low;
46};
47
48/*
49 * Declare storage for 8-byte aligned DMA list.
50 */
51struct dma_list_elem dma_list[15] __attribute__ ((aligned(8)));
52
53/*
54 * External definition for storage
55 * declared in crt0.
56 */
57extern spu_reg128v regs_spill[NR_SPU_SPILL_REGS];
58
59/*
60 * Compute LSCSA byte offset for a given field.
61 */
62static struct spu_lscsa *dummy = (struct spu_lscsa *)0;
63#define LSCSA_BYTE_OFFSET(_field) \
64 ((char *)(&(dummy->_field)) - (char *)(&(dummy->gprs[0].slot[0])))
65#define LSCSA_QW_OFFSET(_field) (LSCSA_BYTE_OFFSET(_field) >> 4)
66
67static inline void set_event_mask(void)
68{
69 unsigned int event_mask = 0;
70
71 /* Save, Step 4:
72 * Restore, Step 1:
73 * Set the SPU_RdEventMsk channel to zero to mask
74 * all events.
75 */
76 spu_writech(SPU_WrEventMask, event_mask);
77}
78
79static inline void set_tag_mask(void)
80{
81 unsigned int tag_mask = 1;
82
83 /* Save, Step 5:
84 * Restore, Step 2:
85 * Set the SPU_WrTagMsk channel to '01' to unmask
86 * only tag group 0.
87 */
88 spu_writech(MFC_WrTagMask, tag_mask);
89}
90
91static inline void build_dma_list(addr64 lscsa_ea)
92{
93 unsigned int ea_low;
94 int i;
95
96 /* Save, Step 6:
97 * Restore, Step 3:
98 * Update the effective address for the CSA in the
99 * pre-canned DMA-list in local storage.
100 */
101 ea_low = lscsa_ea.ui[1];
102 ea_low += LSCSA_BYTE_OFFSET(ls[16384]);
103
104 for (i = 0; i < 15; i++, ea_low += 16384) {
105 dma_list[i].size = 16384;
106 dma_list[i].ea_low = ea_low;
107 }
108}
109
110static inline void enqueue_putllc(addr64 lscsa_ea)
111{
112 unsigned int ls = 0;
113 unsigned int size = 128;
114 unsigned int tag_id = 0;
115 unsigned int cmd = 0xB4; /* PUTLLC */
116
117 /* Save, Step 12:
118 * Restore, Step 7:
119 * Send a PUTLLC (tag 0) command to the MFC using
120 * an effective address in the CSA in order to
121 * remove any possible lock-line reservation.
122 */
123 spu_writech(MFC_LSA, ls);
124 spu_writech(MFC_EAH, lscsa_ea.ui[0]);
125 spu_writech(MFC_EAL, lscsa_ea.ui[1]);
126 spu_writech(MFC_Size, size);
127 spu_writech(MFC_TagID, tag_id);
128 spu_writech(MFC_Cmd, cmd);
129}
130
131static inline void set_tag_update(void)
132{
133 unsigned int update_any = 1;
134
135 /* Save, Step 15:
136 * Restore, Step 8:
137 * Write the MFC_TagUpdate channel with '01'.
138 */
139 spu_writech(MFC_WrTagUpdate, update_any);
140}
141
142static inline void read_tag_status(void)
143{
144 /* Save, Step 16:
145 * Restore, Step 9:
146 * Read the MFC_TagStat channel data.
147 */
148 spu_readch(MFC_RdTagStat);
149}
150
151static inline void read_llar_status(void)
152{
153 /* Save, Step 17:
154 * Restore, Step 10:
155 * Read the MFC_AtomicStat channel data.
156 */
157 spu_readch(MFC_RdAtomicStat);
158}
159
160#endif /* _SPU_CONTEXT_UTILS_H_ */
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
new file mode 100644
index 00000000000..db2601f0abd
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -0,0 +1,163 @@
1/*
2 * SPU file system
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#ifndef SPUFS_H
23#define SPUFS_H
24
25#include <linux/kref.h>
26#include <linux/rwsem.h>
27#include <linux/spinlock.h>
28#include <linux/fs.h>
29
30#include <asm/spu.h>
31#include <asm/spu_csa.h>
32
33/* The magic number for our file system */
34enum {
35 SPUFS_MAGIC = 0x23c9b64e,
36};
37
38struct spu_context_ops;
39
40#define SPU_CONTEXT_PREEMPT 0UL
41
42struct spu_context {
43 struct spu *spu; /* pointer to a physical SPU */
44 struct spu_state csa; /* SPU context save area. */
45 spinlock_t mmio_lock; /* protects mmio access */
46 struct address_space *local_store;/* local store backing store */
47
48 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
49 struct rw_semaphore state_sema;
50 struct semaphore run_sema;
51
52 struct mm_struct *owner;
53
54 struct kref kref;
55 wait_queue_head_t ibox_wq;
56 wait_queue_head_t wbox_wq;
57 wait_queue_head_t stop_wq;
58 struct fasync_struct *ibox_fasync;
59 struct fasync_struct *wbox_fasync;
60 struct spu_context_ops *ops;
61 struct work_struct reap_work;
62 u64 flags;
63};
64
65/* SPU context query/set operations. */
66struct spu_context_ops {
67 int (*mbox_read) (struct spu_context * ctx, u32 * data);
68 u32(*mbox_stat_read) (struct spu_context * ctx);
69 unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
70 unsigned int events);
71 int (*ibox_read) (struct spu_context * ctx, u32 * data);
72 int (*wbox_write) (struct spu_context * ctx, u32 data);
73 u32(*signal1_read) (struct spu_context * ctx);
74 void (*signal1_write) (struct spu_context * ctx, u32 data);
75 u32(*signal2_read) (struct spu_context * ctx);
76 void (*signal2_write) (struct spu_context * ctx, u32 data);
77 void (*signal1_type_set) (struct spu_context * ctx, u64 val);
78 u64(*signal1_type_get) (struct spu_context * ctx);
79 void (*signal2_type_set) (struct spu_context * ctx, u64 val);
80 u64(*signal2_type_get) (struct spu_context * ctx);
81 u32(*npc_read) (struct spu_context * ctx);
82 void (*npc_write) (struct spu_context * ctx, u32 data);
83 u32(*status_read) (struct spu_context * ctx);
84 char*(*get_ls) (struct spu_context * ctx);
85 void (*runcntl_write) (struct spu_context * ctx, u32 data);
86 void (*runcntl_stop) (struct spu_context * ctx);
87};
88
89extern struct spu_context_ops spu_hw_ops;
90extern struct spu_context_ops spu_backing_ops;
91
92struct spufs_inode_info {
93 struct spu_context *i_ctx;
94 struct inode vfs_inode;
95};
96#define SPUFS_I(inode) \
97 container_of(inode, struct spufs_inode_info, vfs_inode)
98
99extern struct tree_descr spufs_dir_contents[];
100
101/* system call implementation */
102long spufs_run_spu(struct file *file,
103 struct spu_context *ctx, u32 *npc, u32 *status);
104long spufs_create_thread(struct nameidata *nd,
105 unsigned int flags, mode_t mode);
106extern struct file_operations spufs_context_fops;
107
108/* context management */
109struct spu_context * alloc_spu_context(struct address_space *local_store);
110void destroy_spu_context(struct kref *kref);
111struct spu_context * get_spu_context(struct spu_context *ctx);
112int put_spu_context(struct spu_context *ctx);
113void spu_unmap_mappings(struct spu_context *ctx);
114
115void spu_forget(struct spu_context *ctx);
116void spu_acquire(struct spu_context *ctx);
117void spu_release(struct spu_context *ctx);
118int spu_acquire_runnable(struct spu_context *ctx);
119void spu_acquire_saved(struct spu_context *ctx);
120
121int spu_activate(struct spu_context *ctx, u64 flags);
122void spu_deactivate(struct spu_context *ctx);
123void spu_yield(struct spu_context *ctx);
124int __init spu_sched_init(void);
125void __exit spu_sched_exit(void);
126
127/*
128 * spufs_wait
129 * Same as wait_event_interruptible(), except that here
130 * we need to call spu_release(ctx) before sleeping, and
131 * then spu_acquire(ctx) when awoken.
132 */
133
134#define spufs_wait(wq, condition) \
135({ \
136 int __ret = 0; \
137 DEFINE_WAIT(__wait); \
138 for (;;) { \
139 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
140 if (condition) \
141 break; \
142 if (!signal_pending(current)) { \
143 spu_release(ctx); \
144 schedule(); \
145 spu_acquire(ctx); \
146 continue; \
147 } \
148 __ret = -ERESTARTSYS; \
149 break; \
150 } \
151 finish_wait(&(wq), &__wait); \
152 __ret; \
153})
154
155size_t spu_wbox_write(struct spu_context *ctx, u32 data);
156size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
157
158/* irq callback funcs. */
159void spufs_ibox_callback(struct spu *spu);
160void spufs_wbox_callback(struct spu *spu);
161void spufs_stop_callback(struct spu *spu);
162
163#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
new file mode 100644
index 00000000000..1061c12b2ed
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -0,0 +1,2180 @@
1/*
2 * spu_switch.c
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Author: Mark Nutter <mnutter@us.ibm.com>
7 *
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
10 *
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35#include <linux/config.h>
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/sched.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
42#include <linux/smp.h>
43#include <linux/smp_lock.h>
44#include <linux/stddef.h>
45#include <linux/unistd.h>
46
47#include <asm/io.h>
48#include <asm/spu.h>
49#include <asm/spu_csa.h>
50#include <asm/mmu_context.h>
51
52#include "spu_save_dump.h"
53#include "spu_restore_dump.h"
54
55#if 0
56#define POLL_WHILE_TRUE(_c) { \
57 do { \
58 } while (_c); \
59 }
60#else
61#define RELAX_SPIN_COUNT 1000
62#define POLL_WHILE_TRUE(_c) { \
63 do { \
64 int _i; \
65 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
66 cpu_relax(); \
67 } \
68 if (unlikely(_c)) yield(); \
69 else break; \
70 } while (_c); \
71 }
72#endif /* debug */
73
74#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
75
76static inline void acquire_spu_lock(struct spu *spu)
77{
78 /* Save, Step 1:
79 * Restore, Step 1:
80 * Acquire SPU-specific mutual exclusion lock.
81 * TBD.
82 */
83}
84
85static inline void release_spu_lock(struct spu *spu)
86{
87 /* Restore, Step 76:
88 * Release SPU-specific mutual exclusion lock.
89 * TBD.
90 */
91}
92
93static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
94{
95 struct spu_problem __iomem *prob = spu->problem;
96 u32 isolate_state;
97
98 /* Save, Step 2:
99 * Save, Step 6:
100 * If SPU_Status[E,L,IS] any field is '1', this
101 * SPU is in isolate state and cannot be context
102 * saved at this time.
103 */
104 isolate_state = SPU_STATUS_ISOLATED_STATE |
105 SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
107}
108
109static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
110{
111 /* Save, Step 3:
112 * Restore, Step 2:
113 * Save INT_Mask_class0 in CSA.
114 * Write INT_MASK_class0 with value of 0.
115 * Save INT_Mask_class1 in CSA.
116 * Write INT_MASK_class1 with value of 0.
117 * Save INT_Mask_class2 in CSA.
118 * Write INT_MASK_class2 with value of 0.
119 */
120 spin_lock_irq(&spu->register_lock);
121 if (csa) {
122 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
123 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
124 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
125 }
126 spu_int_mask_set(spu, 0, 0ul);
127 spu_int_mask_set(spu, 1, 0ul);
128 spu_int_mask_set(spu, 2, 0ul);
129 eieio();
130 spin_unlock_irq(&spu->register_lock);
131}
132
133static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
134{
135 /* Save, Step 4:
136 * Restore, Step 25.
137 * Set a software watchdog timer, which specifies the
138 * maximum allowable time for a context save sequence.
139 *
140 * For present, this implementation will not set a global
141 * watchdog timer, as virtualization & variable system load
142 * may cause unpredictable execution times.
143 */
144}
145
146static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
147{
148 /* Save, Step 5:
149 * Restore, Step 3:
150 * Inhibit user-space access (if provided) to this
151 * SPU by unmapping the virtual pages assigned to
152 * the SPU memory-mapped I/O (MMIO) for problem
153 * state. TBD.
154 */
155}
156
157static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
158{
159 /* Save, Step 7:
160 * Restore, Step 5:
161 * Set a software context switch pending flag.
162 */
163 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
164 mb();
165}
166
167static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
168{
169 struct spu_priv2 __iomem *priv2 = spu->priv2;
170
171 /* Save, Step 8:
172 * Read and save MFC_CNTL[Ss].
173 */
174 if (csa) {
175 csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) &
176 MFC_CNTL_SUSPEND_DMA_STATUS_MASK;
177 }
178}
179
180static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
181{
182 struct spu_problem __iomem *prob = spu->problem;
183
184 /* Save, Step 9:
185 * Save SPU_Runcntl in the CSA. This value contains
186 * the "Application Desired State".
187 */
188 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
189}
190
191static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
192{
193 /* Save, Step 10:
194 * Save MFC_SR1 in the CSA.
195 */
196 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
197}
198
199static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
200{
201 struct spu_problem __iomem *prob = spu->problem;
202
203 /* Save, Step 11:
204 * Read SPU_Status[R], and save to CSA.
205 */
206 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
207 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
208 } else {
209 u32 stopped;
210
211 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
212 eieio();
213 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
214 SPU_STATUS_RUNNING);
215 stopped =
216 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
217 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
218 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
219 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
220 else
221 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
222 }
223}
224
225static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
226{
227 struct spu_priv2 __iomem *priv2 = spu->priv2;
228
229 /* Save, Step 12:
230 * Read MFC_CNTL[Ds]. Update saved copy of
231 * CSA.MFC_CNTL[Ds].
232 */
233 if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) {
234 csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
235 csa->suspend_time = get_cycles();
236 out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
237 eieio();
238 csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
239 eieio();
240 }
241}
242
243static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
244{
245 struct spu_priv2 __iomem *priv2 = spu->priv2;
246
247 /* Save, Step 13:
248 * Write MFC_CNTL[Dh] set to a '1' to halt
249 * the decrementer.
250 */
251 out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED);
252 eieio();
253}
254
255static inline void save_timebase(struct spu_state *csa, struct spu *spu)
256{
257 /* Save, Step 14:
258 * Read PPE Timebase High and Timebase low registers
259 * and save in CSA. TBD.
260 */
261 csa->suspend_time = get_cycles();
262}
263
264static inline void remove_other_spu_access(struct spu_state *csa,
265 struct spu *spu)
266{
267 /* Save, Step 15:
268 * Remove other SPU access to this SPU by unmapping
269 * this SPU's pages from their address space. TBD.
270 */
271}
272
273static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
274{
275 struct spu_problem __iomem *prob = spu->problem;
276
277 /* Save, Step 16:
278 * Restore, Step 11.
279 * Write SPU_MSSync register. Poll SPU_MSSync[P]
280 * for a value of 0.
281 */
282 out_be64(&prob->spc_mssync_RW, 1UL);
283 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
284}
285
286static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
287{
288 /* Save, Step 17:
289 * Restore, Step 12.
290 * Restore, Step 48.
291 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
292 * Then issue a PPE sync instruction.
293 */
294 spu_tlb_invalidate(spu);
295 mb();
296}
297
298static inline void handle_pending_interrupts(struct spu_state *csa,
299 struct spu *spu)
300{
301 /* Save, Step 18:
302 * Handle any pending interrupts from this SPU
303 * here. This is OS or hypervisor specific. One
304 * option is to re-enable interrupts to handle any
305 * pending interrupts, with the interrupt handlers
306 * recognizing the software Context Switch Pending
307 * flag, to ensure the SPU execution or MFC command
308 * queue is not restarted. TBD.
309 */
310}
311
312static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
313{
314 struct spu_priv2 __iomem *priv2 = spu->priv2;
315 int i;
316
317 /* Save, Step 19:
318 * If MFC_Cntl[Se]=0 then save
319 * MFC command queues.
320 */
321 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
322 for (i = 0; i < 8; i++) {
323 csa->priv2.puq[i].mfc_cq_data0_RW =
324 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
325 csa->priv2.puq[i].mfc_cq_data1_RW =
326 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
327 csa->priv2.puq[i].mfc_cq_data2_RW =
328 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
329 csa->priv2.puq[i].mfc_cq_data3_RW =
330 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
331 }
332 for (i = 0; i < 16; i++) {
333 csa->priv2.spuq[i].mfc_cq_data0_RW =
334 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
335 csa->priv2.spuq[i].mfc_cq_data1_RW =
336 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
337 csa->priv2.spuq[i].mfc_cq_data2_RW =
338 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
339 csa->priv2.spuq[i].mfc_cq_data3_RW =
340 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
341 }
342 }
343}
344
345static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
346{
347 struct spu_problem __iomem *prob = spu->problem;
348
349 /* Save, Step 20:
350 * Save the PPU_QueryMask register
351 * in the CSA.
352 */
353 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
354}
355
356static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
357{
358 struct spu_problem __iomem *prob = spu->problem;
359
360 /* Save, Step 21:
361 * Save the PPU_QueryType register
362 * in the CSA.
363 */
364 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
365}
366
367static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
368{
369 struct spu_priv2 __iomem *priv2 = spu->priv2;
370
371 /* Save, Step 22:
372 * Save the MFC_CSR_TSQ register
373 * in the LSCSA.
374 */
375 csa->priv2.spu_tag_status_query_RW =
376 in_be64(&priv2->spu_tag_status_query_RW);
377}
378
379static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
380{
381 struct spu_priv2 __iomem *priv2 = spu->priv2;
382
383 /* Save, Step 23:
384 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
385 * registers in the CSA.
386 */
387 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
388 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
389}
390
391static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
392{
393 struct spu_priv2 __iomem *priv2 = spu->priv2;
394
395 /* Save, Step 24:
396 * Save the MFC_CSR_ATO register in
397 * the CSA.
398 */
399 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
400}
401
402static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
403{
404 /* Save, Step 25:
405 * Save the MFC_TCLASS_ID register in
406 * the CSA.
407 */
408 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
409}
410
411static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
412{
413 /* Save, Step 26:
414 * Restore, Step 23.
415 * Write the MFC_TCLASS_ID register with
416 * the value 0x10000000.
417 */
418 spu_mfc_tclass_id_set(spu, 0x10000000);
419 eieio();
420}
421
422static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
423{
424 struct spu_priv2 __iomem *priv2 = spu->priv2;
425
426 /* Save, Step 27:
427 * Restore, Step 14.
428 * Write MFC_CNTL[Pc]=1 (purge queue).
429 */
430 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
431 eieio();
432}
433
434static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
435{
436 struct spu_priv2 __iomem *priv2 = spu->priv2;
437
438 /* Save, Step 28:
439 * Poll MFC_CNTL[Ps] until value '11' is read
440 * (purge complete).
441 */
442 POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
443 MFC_CNTL_PURGE_DMA_COMPLETE);
444}
445
446static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
447{
448 struct spu_priv2 __iomem *priv2 = spu->priv2;
449 int i;
450
451 /* Save, Step 29:
452 * If MFC_SR1[R]='1', save SLBs in CSA.
453 */
454 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
455 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
456 for (i = 0; i < 8; i++) {
457 out_be64(&priv2->slb_index_W, i);
458 eieio();
459 csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW);
460 csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW);
461 eieio();
462 }
463 }
464}
465
466static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
467{
468 /* Save, Step 30:
469 * Restore, Step 18:
470 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
471 * MFC_SR1[TL,R,Pr,T] set correctly for the
472 * OS specific environment.
473 *
474 * Implementation note: The SPU-side code
475 * for save/restore is privileged, so the
476 * MFC_SR1[Pr] bit is not set.
477 *
478 */
479 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
480 MFC_STATE1_RELOCATE_MASK |
481 MFC_STATE1_BUS_TLBIE_MASK));
482}
483
484static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
485{
486 struct spu_problem __iomem *prob = spu->problem;
487
488 /* Save, Step 31:
489 * Save SPU_NPC in the CSA.
490 */
491 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
492}
493
494static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
495{
496 struct spu_priv2 __iomem *priv2 = spu->priv2;
497
498 /* Save, Step 32:
499 * Save SPU_PrivCntl in the CSA.
500 */
501 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
502}
503
504static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
505{
506 struct spu_priv2 __iomem *priv2 = spu->priv2;
507
508 /* Save, Step 33:
509 * Restore, Step 16:
510 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
511 */
512 out_be64(&priv2->spu_privcntl_RW, 0UL);
513 eieio();
514}
515
516static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
517{
518 struct spu_priv2 __iomem *priv2 = spu->priv2;
519
520 /* Save, Step 34:
521 * Save SPU_LSLR in the CSA.
522 */
523 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
524}
525
526static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
527{
528 struct spu_priv2 __iomem *priv2 = spu->priv2;
529
530 /* Save, Step 35:
531 * Restore, Step 17.
532 * Reset SPU_LSLR.
533 */
534 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
535 eieio();
536}
537
538static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
539{
540 struct spu_priv2 __iomem *priv2 = spu->priv2;
541
542 /* Save, Step 36:
543 * Save SPU_Cfg in the CSA.
544 */
545 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
546}
547
548static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
549{
550 /* Save, Step 37:
551 * Save PM_Trace_Tag_Wait_Mask in the CSA.
552 * Not performed by this implementation.
553 */
554}
555
556static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
557{
558 /* Save, Step 38:
559 * Save RA_GROUP_ID register and the
560 * RA_ENABLE reigster in the CSA.
561 */
562 csa->priv1.resource_allocation_groupID_RW =
563 spu_resource_allocation_groupID_get(spu);
564 csa->priv1.resource_allocation_enable_RW =
565 spu_resource_allocation_enable_get(spu);
566}
567
568static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
569{
570 struct spu_problem __iomem *prob = spu->problem;
571
572 /* Save, Step 39:
573 * Save MB_Stat register in the CSA.
574 */
575 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
576}
577
578static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
579{
580 struct spu_problem __iomem *prob = spu->problem;
581
582 /* Save, Step 40:
583 * Save the PPU_MB register in the CSA.
584 */
585 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
586}
587
588static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
589{
590 struct spu_priv2 __iomem *priv2 = spu->priv2;
591
592 /* Save, Step 41:
593 * Save the PPUINT_MB register in the CSA.
594 */
595 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
596}
597
598static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
599{
600 struct spu_priv2 __iomem *priv2 = spu->priv2;
601 u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
602 int i;
603
604 /* Save, Step 42:
605 * Save the following CH: [0,1,3,4,24,25,27]
606 */
607 for (i = 0; i < 7; i++) {
608 idx = ch_indices[i];
609 out_be64(&priv2->spu_chnlcntptr_RW, idx);
610 eieio();
611 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
612 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
613 out_be64(&priv2->spu_chnldata_RW, 0UL);
614 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
615 eieio();
616 }
617}
618
619static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
620{
621 struct spu_priv2 __iomem *priv2 = spu->priv2;
622 int i;
623
624 /* Save, Step 43:
625 * Save SPU Read Mailbox Channel.
626 */
627 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
628 eieio();
629 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
630 for (i = 0; i < 4; i++) {
631 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
632 }
633 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
634 eieio();
635}
636
637static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
638{
639 struct spu_priv2 __iomem *priv2 = spu->priv2;
640
641 /* Save, Step 44:
642 * Save MFC_CMD Channel.
643 */
644 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
645 eieio();
646 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
647 eieio();
648}
649
650static inline void reset_ch(struct spu_state *csa, struct spu *spu)
651{
652 struct spu_priv2 __iomem *priv2 = spu->priv2;
653 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
654 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
655 u64 idx;
656 int i;
657
658 /* Save, Step 45:
659 * Reset the following CH: [21, 23, 28, 30]
660 */
661 for (i = 0; i < 4; i++) {
662 idx = ch_indices[i];
663 out_be64(&priv2->spu_chnlcntptr_RW, idx);
664 eieio();
665 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
666 eieio();
667 }
668}
669
670static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
671{
672 struct spu_priv2 __iomem *priv2 = spu->priv2;
673
674 /* Save, Step 46:
675 * Restore, Step 25.
676 * Write MFC_CNTL[Sc]=0 (resume queue processing).
677 */
678 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
679}
680
681static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
682{
683 struct spu_priv2 __iomem *priv2 = spu->priv2;
684
685 /* Save, Step 45:
686 * Restore, Step 19:
687 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
688 */
689 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
690 out_be64(&priv2->slb_invalidate_all_W, 0UL);
691 eieio();
692 }
693}
694
695static inline void get_kernel_slb(u64 ea, u64 slb[2])
696{
697 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
698 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
699
700 /* Large pages are used for kernel text/data, but not vmalloc. */
701 if (cpu_has_feature(CPU_FTR_16M_PAGE)
702 && REGION_ID(ea) == KERNEL_REGION_ID)
703 slb[0] |= SLB_VSID_L;
704}
705
706static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
707{
708 struct spu_priv2 __iomem *priv2 = spu->priv2;
709
710 out_be64(&priv2->slb_index_W, slbe);
711 eieio();
712 out_be64(&priv2->slb_vsid_RW, slb[0]);
713 out_be64(&priv2->slb_esid_RW, slb[1]);
714 eieio();
715}
716
717static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
718{
719 u64 code_slb[2];
720 u64 lscsa_slb[2];
721
722 /* Save, Step 47:
723 * Restore, Step 30.
724 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
725 * register, then initialize SLB_VSID and SLB_ESID
726 * to provide access to SPU context save code and
727 * LSCSA.
728 *
729 * This implementation places both the context
730 * switch code and LSCSA in kernel address space.
731 *
732 * Further this implementation assumes that the
733 * MFC_SR1[R]=1 (in other words, assume that
734 * translation is desired by OS environment).
735 */
736 invalidate_slbs(csa, spu);
737 get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
738 get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
739 load_mfc_slb(spu, code_slb, 0);
740 if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
741 load_mfc_slb(spu, lscsa_slb, 1);
742}
743
744static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
745{
746 /* Save, Step 48:
747 * Restore, Step 23.
748 * Change the software context switch pending flag
749 * to context switch active.
750 */
751 set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
752 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
753 mb();
754}
755
756static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
757{
758 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
759 CLASS1_ENABLE_STORAGE_FAULT_INTR;
760
761 /* Save, Step 49:
762 * Restore, Step 22:
763 * Reset and then enable interrupts, as
764 * needed by OS.
765 *
766 * This implementation enables only class1
767 * (translation) interrupts.
768 */
769 spin_lock_irq(&spu->register_lock);
770 spu_int_stat_clear(spu, 0, ~0ul);
771 spu_int_stat_clear(spu, 1, ~0ul);
772 spu_int_stat_clear(spu, 2, ~0ul);
773 spu_int_mask_set(spu, 0, 0ul);
774 spu_int_mask_set(spu, 1, class1_mask);
775 spu_int_mask_set(spu, 2, 0ul);
776 spin_unlock_irq(&spu->register_lock);
777}
778
779static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
780 unsigned int ls_offset, unsigned int size,
781 unsigned int tag, unsigned int rclass,
782 unsigned int cmd)
783{
784 struct spu_problem __iomem *prob = spu->problem;
785 union mfc_tag_size_class_cmd command;
786 unsigned int transfer_size;
787 volatile unsigned int status = 0x0;
788
789 while (size > 0) {
790 transfer_size =
791 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
792 command.u.mfc_size = transfer_size;
793 command.u.mfc_tag = tag;
794 command.u.mfc_rclassid = rclass;
795 command.u.mfc_cmd = cmd;
796 do {
797 out_be32(&prob->mfc_lsa_W, ls_offset);
798 out_be64(&prob->mfc_ea_W, ea);
799 out_be64(&prob->mfc_union_W.all64, command.all64);
800 status =
801 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
802 if (unlikely(status & 0x2)) {
803 cpu_relax();
804 }
805 } while (status & 0x3);
806 size -= transfer_size;
807 ea += transfer_size;
808 ls_offset += transfer_size;
809 }
810 return 0;
811}
812
813static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
814{
815 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
816 unsigned int ls_offset = 0x0;
817 unsigned int size = 16384;
818 unsigned int tag = 0;
819 unsigned int rclass = 0;
820 unsigned int cmd = MFC_PUT_CMD;
821
822 /* Save, Step 50:
823 * Issue a DMA command to copy the first 16K bytes
824 * of local storage to the CSA.
825 */
826 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
827}
828
829static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
830{
831 struct spu_problem __iomem *prob = spu->problem;
832
833 /* Save, Step 51:
834 * Restore, Step 31.
835 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
836 * point address of context save code in local
837 * storage.
838 *
839 * This implementation uses SPU-side save/restore
840 * programs with entry points at LSA of 0.
841 */
842 out_be32(&prob->spu_npc_RW, 0);
843 eieio();
844}
845
846static inline void set_signot1(struct spu_state *csa, struct spu *spu)
847{
848 struct spu_problem __iomem *prob = spu->problem;
849 union {
850 u64 ull;
851 u32 ui[2];
852 } addr64;
853
854 /* Save, Step 52:
855 * Restore, Step 32:
856 * Write SPU_Sig_Notify_1 register with upper 32-bits
857 * of the CSA.LSCSA effective address.
858 */
859 addr64.ull = (u64) csa->lscsa;
860 out_be32(&prob->signal_notify1, addr64.ui[0]);
861 eieio();
862}
863
864static inline void set_signot2(struct spu_state *csa, struct spu *spu)
865{
866 struct spu_problem __iomem *prob = spu->problem;
867 union {
868 u64 ull;
869 u32 ui[2];
870 } addr64;
871
872 /* Save, Step 53:
873 * Restore, Step 33:
874 * Write SPU_Sig_Notify_2 register with lower 32-bits
875 * of the CSA.LSCSA effective address.
876 */
877 addr64.ull = (u64) csa->lscsa;
878 out_be32(&prob->signal_notify2, addr64.ui[1]);
879 eieio();
880}
881
882static inline void send_save_code(struct spu_state *csa, struct spu *spu)
883{
884 unsigned long addr = (unsigned long)&spu_save_code[0];
885 unsigned int ls_offset = 0x0;
886 unsigned int size = sizeof(spu_save_code);
887 unsigned int tag = 0;
888 unsigned int rclass = 0;
889 unsigned int cmd = MFC_GETFS_CMD;
890
891 /* Save, Step 54:
892 * Issue a DMA command to copy context save code
893 * to local storage and start SPU.
894 */
895 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
896}
897
898static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
899{
900 struct spu_problem __iomem *prob = spu->problem;
901
902 /* Save, Step 55:
903 * Restore, Step 38.
904 * Write PPU_QueryMask=1 (enable Tag Group 0)
905 * and issue eieio instruction.
906 */
907 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
908 eieio();
909}
910
911static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
912{
913 struct spu_problem __iomem *prob = spu->problem;
914 u32 mask = MFC_TAGID_TO_TAGMASK(0);
915 unsigned long flags;
916
917 /* Save, Step 56:
918 * Restore, Step 39.
919 * Restore, Step 39.
920 * Restore, Step 46.
921 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
922 * or write PPU_QueryType[TS]=01 and wait for Tag Group
923 * Complete Interrupt. Write INT_Stat_Class0 or
924 * INT_Stat_Class2 with value of 'handled'.
925 */
926 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
927
928 local_irq_save(flags);
929 spu_int_stat_clear(spu, 0, ~(0ul));
930 spu_int_stat_clear(spu, 2, ~(0ul));
931 local_irq_restore(flags);
932}
933
934static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
935{
936 struct spu_problem __iomem *prob = spu->problem;
937 unsigned long flags;
938
939 /* Save, Step 57:
940 * Restore, Step 40.
941 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
942 * or SPU Class 2 interrupt. Write INT_Stat_class0
943 * or INT_Stat_class2 with value of handled.
944 */
945 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
946
947 local_irq_save(flags);
948 spu_int_stat_clear(spu, 0, ~(0ul));
949 spu_int_stat_clear(spu, 2, ~(0ul));
950 local_irq_restore(flags);
951}
952
953static inline int check_save_status(struct spu_state *csa, struct spu *spu)
954{
955 struct spu_problem __iomem *prob = spu->problem;
956 u32 complete;
957
958 /* Save, Step 54:
959 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
960 * context save succeeded, otherwise context save
961 * failed.
962 */
963 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
964 SPU_STATUS_STOPPED_BY_STOP);
965 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
966}
967
968static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
969{
970 /* Restore, Step 4:
971 * If required, notify the "using application" that
972 * the SPU task has been terminated. TBD.
973 */
974}
975
976static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
977{
978 struct spu_priv2 __iomem *priv2 = spu->priv2;
979
980 /* Restore, Step 7:
981 * Restore, Step 47.
982 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
983 * the queue and halt the decrementer.
984 */
985 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
986 MFC_CNTL_DECREMENTER_HALTED);
987 eieio();
988}
989
990static inline void wait_suspend_mfc_complete(struct spu_state *csa,
991 struct spu *spu)
992{
993 struct spu_priv2 __iomem *priv2 = spu->priv2;
994
995 /* Restore, Step 8:
996 * Restore, Step 47.
997 * Poll MFC_CNTL[Ss] until 11 is returned.
998 */
999 POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
1000 MFC_CNTL_SUSPEND_COMPLETE);
1001}
1002
1003static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1004{
1005 struct spu_problem __iomem *prob = spu->problem;
1006
1007 /* Restore, Step 9:
1008 * If SPU_Status[R]=1, stop SPU execution
1009 * and wait for stop to complete.
1010 *
1011 * Returns 1 if SPU_Status[R]=1 on entry.
1012 * 0 otherwise
1013 */
1014 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015 if (in_be32(&prob->spu_status_R) &
1016 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1017 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1018 SPU_STATUS_RUNNING);
1019 }
1020 if ((in_be32(&prob->spu_status_R) &
1021 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1022 || (in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_ISOLATED_STATE)) {
1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1025 eieio();
1026 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027 SPU_STATUS_RUNNING);
1028 out_be32(&prob->spu_runcntl_RW, 0x2);
1029 eieio();
1030 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1031 SPU_STATUS_RUNNING);
1032 }
1033 if (in_be32(&prob->spu_status_R) &
1034 SPU_STATUS_WAITING_FOR_CHANNEL) {
1035 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1036 eieio();
1037 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1038 SPU_STATUS_RUNNING);
1039 }
1040 return 1;
1041 }
1042 return 0;
1043}
1044
1045static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1046{
1047 struct spu_problem __iomem *prob = spu->problem;
1048
1049 /* Restore, Step 10:
1050 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1051 * release SPU from isolate state.
1052 */
1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054 if (in_be32(&prob->spu_status_R) &
1055 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1056 spu_mfc_sr1_set(spu,
1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1058 eieio();
1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1060 eieio();
1061 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1062 SPU_STATUS_RUNNING);
1063 }
1064 if ((in_be32(&prob->spu_status_R) &
1065 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1066 || (in_be32(&prob->spu_status_R) &
1067 SPU_STATUS_ISOLATED_STATE)) {
1068 spu_mfc_sr1_set(spu,
1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1070 eieio();
1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1072 eieio();
1073 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1074 SPU_STATUS_RUNNING);
1075 }
1076 }
1077}
1078
1079static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1080{
1081 struct spu_priv2 __iomem *priv2 = spu->priv2;
1082 u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1083 u64 idx;
1084 int i;
1085
1086 /* Restore, Step 20:
1087 * Reset the following CH: [0,1,3,4,24,25,27]
1088 */
1089 for (i = 0; i < 7; i++) {
1090 idx = ch_indices[i];
1091 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1092 eieio();
1093 out_be64(&priv2->spu_chnldata_RW, 0UL);
1094 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1095 eieio();
1096 }
1097}
1098
1099static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1100{
1101 struct spu_priv2 __iomem *priv2 = spu->priv2;
1102 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1103 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1104 u64 idx;
1105 int i;
1106
1107 /* Restore, Step 21:
1108 * Reset the following CH: [21, 23, 28, 29, 30]
1109 */
1110 for (i = 0; i < 5; i++) {
1111 idx = ch_indices[i];
1112 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1113 eieio();
1114 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1115 eieio();
1116 }
1117}
1118
1119static inline void setup_spu_status_part1(struct spu_state *csa,
1120 struct spu *spu)
1121{
1122 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1123 u32 status_I = SPU_STATUS_INVALID_INSTR;
1124 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1125 u32 status_S = SPU_STATUS_SINGLE_STEP;
1126 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1127 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1128 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1129 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1130 u32 status_code;
1131
1132 /* Restore, Step 27:
1133 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1134 * instruction sequence to the end of the SPU based restore
1135 * code (after the "context restored" stop and signal) to
1136 * restore the correct SPU status.
1137 *
1138 * NOTE: Rather than modifying the SPU executable, we
1139 * instead add a new 'stopped_status' field to the
1140 * LSCSA. The SPU-side restore reads this field and
1141 * takes the appropriate action when exiting.
1142 */
1143
1144 status_code =
1145 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1146 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1147
1148 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1149 * by Stop and Signal instruction, followed by 'br -4'.
1150 *
1151 */
1152 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1153 csa->lscsa->stopped_status.slot[1] = status_code;
1154
1155 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1156
1157 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1158 * by Stop and Signal instruction, followed by
1159 * 'br -4'.
1160 */
1161 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1162 csa->lscsa->stopped_status.slot[1] = status_code;
1163
1164 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1165
1166 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1167 * followed by 'br -4'.
1168 */
1169 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1170 csa->lscsa->stopped_status.slot[1] = status_code;
1171
1172 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1173
1174 /* SPU_Status[S,I]=1 - Illegal instruction followed
1175 * by 'br -4'.
1176 */
1177 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1178 csa->lscsa->stopped_status.slot[1] = status_code;
1179
1180 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1181
1182 /* SPU_Status[P]=1 - Stop and Signal instruction
1183 * followed by 'br -4'.
1184 */
1185 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1186 csa->lscsa->stopped_status.slot[1] = status_code;
1187
1188 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1189
1190 /* SPU_Status[H]=1 - Halt Conditional, followed
1191 * by 'br -4'.
1192 */
1193 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1194
1195 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1196
1197 /* SPU_Status[S]=1 - Two nop instructions.
1198 */
1199 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1200
1201 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1202
1203 /* SPU_Status[I]=1 - Illegal instruction followed
1204 * by 'br -4'.
1205 */
1206 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1207
1208 }
1209}
1210
1211static inline void setup_spu_status_part2(struct spu_state *csa,
1212 struct spu *spu)
1213{
1214 u32 mask;
1215
1216 /* Restore, Step 28:
1217 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1218 * add a 'br *' instruction to the end of
1219 * the SPU based restore code.
1220 *
1221 * NOTE: Rather than modifying the SPU executable, we
1222 * instead add a new 'stopped_status' field to the
1223 * LSCSA. The SPU-side restore reads this field and
1224 * takes the appropriate action when exiting.
1225 */
1226 mask = SPU_STATUS_INVALID_INSTR |
1227 SPU_STATUS_SINGLE_STEP |
1228 SPU_STATUS_STOPPED_BY_HALT |
1229 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1230 if (!(csa->prob.spu_status_R & mask)) {
1231 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1232 }
1233}
1234
1235static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1236{
1237 /* Restore, Step 29:
1238 * Restore RA_GROUP_ID register and the
1239 * RA_ENABLE reigster from the CSA.
1240 */
1241 spu_resource_allocation_groupID_set(spu,
1242 csa->priv1.resource_allocation_groupID_RW);
1243 spu_resource_allocation_enable_set(spu,
1244 csa->priv1.resource_allocation_enable_RW);
1245}
1246
1247static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1248{
1249 unsigned long addr = (unsigned long)&spu_restore_code[0];
1250 unsigned int ls_offset = 0x0;
1251 unsigned int size = sizeof(spu_restore_code);
1252 unsigned int tag = 0;
1253 unsigned int rclass = 0;
1254 unsigned int cmd = MFC_GETFS_CMD;
1255
1256 /* Restore, Step 37:
1257 * Issue MFC DMA command to copy context
1258 * restore code to local storage.
1259 */
1260 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1261}
1262
1263static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1264{
1265 /* Restore, Step 34:
1266 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1267 * running) then adjust decrementer, set
1268 * decrementer running status in LSCSA,
1269 * and set decrementer "wrapped" status
1270 * in LSCSA.
1271 */
1272 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1273 cycles_t resume_time = get_cycles();
1274 cycles_t delta_time = resume_time - csa->suspend_time;
1275
1276 csa->lscsa->decr.slot[0] = delta_time;
1277 }
1278}
1279
1280static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1281{
1282 /* Restore, Step 35:
1283 * Copy the CSA.PU_MB data into the LSCSA.
1284 */
1285 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1286}
1287
1288static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1289{
1290 /* Restore, Step 36:
1291 * Copy the CSA.PUINT_MB data into the LSCSA.
1292 */
1293 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1294}
1295
1296static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1297{
1298 struct spu_problem __iomem *prob = spu->problem;
1299 u32 complete;
1300
1301 /* Restore, Step 40:
1302 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1303 * context restore succeeded, otherwise context restore
1304 * failed.
1305 */
1306 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1307 SPU_STATUS_STOPPED_BY_STOP);
1308 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1309}
1310
1311static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1312{
1313 struct spu_priv2 __iomem *priv2 = spu->priv2;
1314
1315 /* Restore, Step 41:
1316 * Restore SPU_PrivCntl from the CSA.
1317 */
1318 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1319 eieio();
1320}
1321
1322static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1323{
1324 struct spu_problem __iomem *prob = spu->problem;
1325 u32 mask;
1326
1327 /* Restore, Step 42:
1328 * If any CSA.SPU_Status[I,S,H,P]=1, then
1329 * restore the error or single step state.
1330 */
1331 mask = SPU_STATUS_INVALID_INSTR |
1332 SPU_STATUS_SINGLE_STEP |
1333 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1334 if (csa->prob.spu_status_R & mask) {
1335 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1336 eieio();
1337 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1338 SPU_STATUS_RUNNING);
1339 }
1340}
1341
1342static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1343{
1344 struct spu_problem __iomem *prob = spu->problem;
1345 u32 mask;
1346
1347 /* Restore, Step 43:
1348 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1349 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1350 * then write '00' to SPU_RunCntl[R0R1] and wait
1351 * for SPU_Status[R]=0.
1352 */
1353 mask = SPU_STATUS_INVALID_INSTR |
1354 SPU_STATUS_SINGLE_STEP |
1355 SPU_STATUS_STOPPED_BY_HALT |
1356 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1357 if (!(csa->prob.spu_status_R & mask)) {
1358 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1359 eieio();
1360 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1361 SPU_STATUS_RUNNING);
1362 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1363 eieio();
1364 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1365 SPU_STATUS_RUNNING);
1366 }
1367}
1368
1369static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1370{
1371 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1372 unsigned int ls_offset = 0x0;
1373 unsigned int size = 16384;
1374 unsigned int tag = 0;
1375 unsigned int rclass = 0;
1376 unsigned int cmd = MFC_GET_CMD;
1377
1378 /* Restore, Step 44:
1379 * Issue a DMA command to restore the first
1380 * 16kb of local storage from CSA.
1381 */
1382 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1383}
1384
1385static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1386{
1387 /* Restore, Step 49:
1388 * Write INT_MASK_class0 with value of 0.
1389 * Write INT_MASK_class1 with value of 0.
1390 * Write INT_MASK_class2 with value of 0.
1391 * Write INT_STAT_class0 with value of -1.
1392 * Write INT_STAT_class1 with value of -1.
1393 * Write INT_STAT_class2 with value of -1.
1394 */
1395 spin_lock_irq(&spu->register_lock);
1396 spu_int_mask_set(spu, 0, 0ul);
1397 spu_int_mask_set(spu, 1, 0ul);
1398 spu_int_mask_set(spu, 2, 0ul);
1399 spu_int_stat_clear(spu, 0, ~0ul);
1400 spu_int_stat_clear(spu, 1, ~0ul);
1401 spu_int_stat_clear(spu, 2, ~0ul);
1402 spin_unlock_irq(&spu->register_lock);
1403}
1404
1405static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1406{
1407 struct spu_priv2 __iomem *priv2 = spu->priv2;
1408 int i;
1409
1410 /* Restore, Step 50:
1411 * If MFC_Cntl[Se]!=0 then restore
1412 * MFC command queues.
1413 */
1414 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1415 for (i = 0; i < 8; i++) {
1416 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1417 csa->priv2.puq[i].mfc_cq_data0_RW);
1418 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1419 csa->priv2.puq[i].mfc_cq_data1_RW);
1420 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1421 csa->priv2.puq[i].mfc_cq_data2_RW);
1422 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1423 csa->priv2.puq[i].mfc_cq_data3_RW);
1424 }
1425 for (i = 0; i < 16; i++) {
1426 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1427 csa->priv2.spuq[i].mfc_cq_data0_RW);
1428 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1429 csa->priv2.spuq[i].mfc_cq_data1_RW);
1430 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1431 csa->priv2.spuq[i].mfc_cq_data2_RW);
1432 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1433 csa->priv2.spuq[i].mfc_cq_data3_RW);
1434 }
1435 }
1436 eieio();
1437}
1438
1439static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1440{
1441 struct spu_problem __iomem *prob = spu->problem;
1442
1443 /* Restore, Step 51:
1444 * Restore the PPU_QueryMask register from CSA.
1445 */
1446 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1447 eieio();
1448}
1449
1450static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1451{
1452 struct spu_problem __iomem *prob = spu->problem;
1453
1454 /* Restore, Step 52:
1455 * Restore the PPU_QueryType register from CSA.
1456 */
1457 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1458 eieio();
1459}
1460
1461static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1462{
1463 struct spu_priv2 __iomem *priv2 = spu->priv2;
1464
1465 /* Restore, Step 53:
1466 * Restore the MFC_CSR_TSQ register from CSA.
1467 */
1468 out_be64(&priv2->spu_tag_status_query_RW,
1469 csa->priv2.spu_tag_status_query_RW);
1470 eieio();
1471}
1472
1473static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1474{
1475 struct spu_priv2 __iomem *priv2 = spu->priv2;
1476
1477 /* Restore, Step 54:
1478 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1479 * registers from CSA.
1480 */
1481 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1482 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1483 eieio();
1484}
1485
1486static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1487{
1488 struct spu_priv2 __iomem *priv2 = spu->priv2;
1489
1490 /* Restore, Step 55:
1491 * Restore the MFC_CSR_ATO register from CSA.
1492 */
1493 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1494}
1495
1496static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1497{
1498 /* Restore, Step 56:
1499 * Restore the MFC_TCLASS_ID register from CSA.
1500 */
1501 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1502 eieio();
1503}
1504
1505static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1506{
1507 u64 ch0_cnt, ch0_data;
1508 u64 ch1_data;
1509
1510 /* Restore, Step 57:
1511 * Set the Lock Line Reservation Lost Event by:
1512 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1513 * 2. If CSA.SPU_Channel_0_Count=0 and
1514 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1515 * CSA.SPU_Event_Status[Lr]=0 then set
1516 * CSA.SPU_Event_Status_Count=1.
1517 */
1518 ch0_cnt = csa->spu_chnlcnt_RW[0];
1519 ch0_data = csa->spu_chnldata_RW[0];
1520 ch1_data = csa->spu_chnldata_RW[1];
1521 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1522 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1523 (ch1_data & MFC_LLR_LOST_EVENT)) {
1524 csa->spu_chnlcnt_RW[0] = 1;
1525 }
1526}
1527
1528static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1529{
1530 /* Restore, Step 58:
1531 * If the status of the CSA software decrementer
1532 * "wrapped" flag is set, OR in a '1' to
1533 * CSA.SPU_Event_Status[Tm].
1534 */
1535 if (csa->lscsa->decr_status.slot[0] == 1) {
1536 csa->spu_chnldata_RW[0] |= 0x20;
1537 }
1538 if ((csa->lscsa->decr_status.slot[0] == 1) &&
1539 (csa->spu_chnlcnt_RW[0] == 0 &&
1540 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
1541 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
1542 csa->spu_chnlcnt_RW[0] = 1;
1543 }
1544}
1545
1546static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1547{
1548 struct spu_priv2 __iomem *priv2 = spu->priv2;
1549 u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1550 int i;
1551
1552 /* Restore, Step 59:
1553 * Restore the following CH: [0,1,3,4,24,25,27]
1554 */
1555 for (i = 0; i < 7; i++) {
1556 idx = ch_indices[i];
1557 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1558 eieio();
1559 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1560 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1561 eieio();
1562 }
1563}
1564
1565static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1566{
1567 struct spu_priv2 __iomem *priv2 = spu->priv2;
1568 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1569 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1570 u64 idx;
1571 int i;
1572
1573 /* Restore, Step 60:
1574 * Restore the following CH: [9,21,23].
1575 */
1576 ch_counts[0] = 1UL;
1577 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1578 ch_counts[2] = 1UL;
1579 for (i = 0; i < 3; i++) {
1580 idx = ch_indices[i];
1581 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1582 eieio();
1583 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1584 eieio();
1585 }
1586}
1587
1588static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1589{
1590 struct spu_priv2 __iomem *priv2 = spu->priv2;
1591
1592 /* Restore, Step 61:
1593 * Restore the SPU_LSLR register from CSA.
1594 */
1595 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1596 eieio();
1597}
1598
1599static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1600{
1601 struct spu_priv2 __iomem *priv2 = spu->priv2;
1602
1603 /* Restore, Step 62:
1604 * Restore the SPU_Cfg register from CSA.
1605 */
1606 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1607 eieio();
1608}
1609
1610static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1611{
1612 /* Restore, Step 63:
1613 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1614 * Not performed by this implementation.
1615 */
1616}
1617
1618static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1619{
1620 struct spu_problem __iomem *prob = spu->problem;
1621
1622 /* Restore, Step 64:
1623 * Restore SPU_NPC from CSA.
1624 */
1625 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1626 eieio();
1627}
1628
1629static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1630{
1631 struct spu_priv2 __iomem *priv2 = spu->priv2;
1632 int i;
1633
1634 /* Restore, Step 65:
1635 * Restore MFC_RdSPU_MB from CSA.
1636 */
1637 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1638 eieio();
1639 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1640 for (i = 0; i < 4; i++) {
1641 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1642 }
1643 eieio();
1644}
1645
1646static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1647{
1648 struct spu_problem __iomem *prob = spu->problem;
1649 u32 dummy = 0;
1650
1651 /* Restore, Step 66:
1652 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1653 * read from the PPU_MB register.
1654 */
1655 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1656 dummy = in_be32(&prob->pu_mb_R);
1657 eieio();
1658 }
1659}
1660
1661static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1662{
1663 struct spu_priv2 __iomem *priv2 = spu->priv2;
1664 u64 dummy = 0UL;
1665
1666 /* Restore, Step 66:
1667 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1668 * read from the PPUINT_MB register.
1669 */
1670 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1671 dummy = in_be64(&priv2->puint_mb_R);
1672 eieio();
1673 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1674 eieio();
1675 }
1676}
1677
1678static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
1679{
1680 struct spu_priv2 __iomem *priv2 = spu->priv2;
1681 int i;
1682
1683 /* Restore, Step 68:
1684 * If MFC_SR1[R]='1', restore SLBs from CSA.
1685 */
1686 if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) {
1687 for (i = 0; i < 8; i++) {
1688 out_be64(&priv2->slb_index_W, i);
1689 eieio();
1690 out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]);
1691 out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]);
1692 eieio();
1693 }
1694 out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W);
1695 eieio();
1696 }
1697}
1698
1699static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1700{
1701 /* Restore, Step 69:
1702 * Restore the MFC_SR1 register from CSA.
1703 */
1704 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1705 eieio();
1706}
1707
1708static inline void restore_other_spu_access(struct spu_state *csa,
1709 struct spu *spu)
1710{
1711 /* Restore, Step 70:
1712 * Restore other SPU mappings to this SPU. TBD.
1713 */
1714}
1715
1716static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1717{
1718 struct spu_problem __iomem *prob = spu->problem;
1719
1720 /* Restore, Step 71:
1721 * If CSA.SPU_Status[R]=1 then write
1722 * SPU_RunCntl[R0R1]='01'.
1723 */
1724 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1725 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1726 eieio();
1727 }
1728}
1729
1730static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1731{
1732 struct spu_priv2 __iomem *priv2 = spu->priv2;
1733
1734 /* Restore, Step 72:
1735 * Restore the MFC_CNTL register for the CSA.
1736 */
1737 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1738 eieio();
1739}
1740
1741static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1742{
1743 /* Restore, Step 73:
1744 * Enable user-space access (if provided) to this
1745 * SPU by mapping the virtual pages assigned to
1746 * the SPU memory-mapped I/O (MMIO) for problem
1747 * state. TBD.
1748 */
1749}
1750
1751static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1752{
1753 /* Restore, Step 74:
1754 * Reset the "context switch active" flag.
1755 */
1756 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
1757 mb();
1758}
1759
1760static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1761{
1762 /* Restore, Step 75:
1763 * Re-enable SPU interrupts.
1764 */
1765 spin_lock_irq(&spu->register_lock);
1766 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1767 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1768 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1769 spin_unlock_irq(&spu->register_lock);
1770}
1771
1772static int quiece_spu(struct spu_state *prev, struct spu *spu)
1773{
1774 /*
1775 * Combined steps 2-18 of SPU context save sequence, which
1776 * quiesce the SPU state (disable SPU execution, MFC command
1777 * queues, decrementer, SPU interrupts, etc.).
1778 *
1779 * Returns 0 on success.
1780 * 2 if failed step 2.
1781 * 6 if failed step 6.
1782 */
1783
1784 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1785 return 2;
1786 }
1787 disable_interrupts(prev, spu); /* Step 3. */
1788 set_watchdog_timer(prev, spu); /* Step 4. */
1789 inhibit_user_access(prev, spu); /* Step 5. */
1790 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1791 return 6;
1792 }
1793 set_switch_pending(prev, spu); /* Step 7. */
1794 save_mfc_cntl(prev, spu); /* Step 8. */
1795 save_spu_runcntl(prev, spu); /* Step 9. */
1796 save_mfc_sr1(prev, spu); /* Step 10. */
1797 save_spu_status(prev, spu); /* Step 11. */
1798 save_mfc_decr(prev, spu); /* Step 12. */
1799 halt_mfc_decr(prev, spu); /* Step 13. */
1800 save_timebase(prev, spu); /* Step 14. */
1801 remove_other_spu_access(prev, spu); /* Step 15. */
1802 do_mfc_mssync(prev, spu); /* Step 16. */
1803 issue_mfc_tlbie(prev, spu); /* Step 17. */
1804 handle_pending_interrupts(prev, spu); /* Step 18. */
1805
1806 return 0;
1807}
1808
1809static void save_csa(struct spu_state *prev, struct spu *spu)
1810{
1811 /*
1812 * Combine steps 19-44 of SPU context save sequence, which
1813 * save regions of the privileged & problem state areas.
1814 */
1815
1816 save_mfc_queues(prev, spu); /* Step 19. */
1817 save_ppu_querymask(prev, spu); /* Step 20. */
1818 save_ppu_querytype(prev, spu); /* Step 21. */
1819 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1820 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1821 save_mfc_csr_ato(prev, spu); /* Step 24. */
1822 save_mfc_tclass_id(prev, spu); /* Step 25. */
1823 set_mfc_tclass_id(prev, spu); /* Step 26. */
1824 purge_mfc_queue(prev, spu); /* Step 27. */
1825 wait_purge_complete(prev, spu); /* Step 28. */
1826 save_mfc_slbs(prev, spu); /* Step 29. */
1827 setup_mfc_sr1(prev, spu); /* Step 30. */
1828 save_spu_npc(prev, spu); /* Step 31. */
1829 save_spu_privcntl(prev, spu); /* Step 32. */
1830 reset_spu_privcntl(prev, spu); /* Step 33. */
1831 save_spu_lslr(prev, spu); /* Step 34. */
1832 reset_spu_lslr(prev, spu); /* Step 35. */
1833 save_spu_cfg(prev, spu); /* Step 36. */
1834 save_pm_trace(prev, spu); /* Step 37. */
1835 save_mfc_rag(prev, spu); /* Step 38. */
1836 save_ppu_mb_stat(prev, spu); /* Step 39. */
1837 save_ppu_mb(prev, spu); /* Step 40. */
1838 save_ppuint_mb(prev, spu); /* Step 41. */
1839 save_ch_part1(prev, spu); /* Step 42. */
1840 save_spu_mb(prev, spu); /* Step 43. */
1841 save_mfc_cmd(prev, spu); /* Step 44. */
1842 reset_ch(prev, spu); /* Step 45. */
1843}
1844
1845static void save_lscsa(struct spu_state *prev, struct spu *spu)
1846{
1847 /*
1848 * Perform steps 46-57 of SPU context save sequence,
1849 * which save regions of the local store and register
1850 * file.
1851 */
1852
1853 resume_mfc_queue(prev, spu); /* Step 46. */
1854 setup_mfc_slbs(prev, spu); /* Step 47. */
1855 set_switch_active(prev, spu); /* Step 48. */
1856 enable_interrupts(prev, spu); /* Step 49. */
1857 save_ls_16kb(prev, spu); /* Step 50. */
1858 set_spu_npc(prev, spu); /* Step 51. */
1859 set_signot1(prev, spu); /* Step 52. */
1860 set_signot2(prev, spu); /* Step 53. */
1861 send_save_code(prev, spu); /* Step 54. */
1862 set_ppu_querymask(prev, spu); /* Step 55. */
1863 wait_tag_complete(prev, spu); /* Step 56. */
1864 wait_spu_stopped(prev, spu); /* Step 57. */
1865}
1866
1867static void harvest(struct spu_state *prev, struct spu *spu)
1868{
1869 /*
1870 * Perform steps 2-25 of SPU context restore sequence,
1871 * which resets an SPU either after a failed save, or
1872 * when using SPU for first time.
1873 */
1874
1875 disable_interrupts(prev, spu); /* Step 2. */
1876 inhibit_user_access(prev, spu); /* Step 3. */
1877 terminate_spu_app(prev, spu); /* Step 4. */
1878 set_switch_pending(prev, spu); /* Step 5. */
1879 remove_other_spu_access(prev, spu); /* Step 6. */
1880 suspend_mfc(prev, spu); /* Step 7. */
1881 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1882 if (!suspend_spe(prev, spu)) /* Step 9. */
1883 clear_spu_status(prev, spu); /* Step 10. */
1884 do_mfc_mssync(prev, spu); /* Step 11. */
1885 issue_mfc_tlbie(prev, spu); /* Step 12. */
1886 handle_pending_interrupts(prev, spu); /* Step 13. */
1887 purge_mfc_queue(prev, spu); /* Step 14. */
1888 wait_purge_complete(prev, spu); /* Step 15. */
1889 reset_spu_privcntl(prev, spu); /* Step 16. */
1890 reset_spu_lslr(prev, spu); /* Step 17. */
1891 setup_mfc_sr1(prev, spu); /* Step 18. */
1892 invalidate_slbs(prev, spu); /* Step 19. */
1893 reset_ch_part1(prev, spu); /* Step 20. */
1894 reset_ch_part2(prev, spu); /* Step 21. */
1895 enable_interrupts(prev, spu); /* Step 22. */
1896 set_switch_active(prev, spu); /* Step 23. */
1897 set_mfc_tclass_id(prev, spu); /* Step 24. */
1898 resume_mfc_queue(prev, spu); /* Step 25. */
1899}
1900
1901static void restore_lscsa(struct spu_state *next, struct spu *spu)
1902{
1903 /*
1904 * Perform steps 26-40 of SPU context restore sequence,
1905 * which restores regions of the local store and register
1906 * file.
1907 */
1908
1909 set_watchdog_timer(next, spu); /* Step 26. */
1910 setup_spu_status_part1(next, spu); /* Step 27. */
1911 setup_spu_status_part2(next, spu); /* Step 28. */
1912 restore_mfc_rag(next, spu); /* Step 29. */
1913 setup_mfc_slbs(next, spu); /* Step 30. */
1914 set_spu_npc(next, spu); /* Step 31. */
1915 set_signot1(next, spu); /* Step 32. */
1916 set_signot2(next, spu); /* Step 33. */
1917 setup_decr(next, spu); /* Step 34. */
1918 setup_ppu_mb(next, spu); /* Step 35. */
1919 setup_ppuint_mb(next, spu); /* Step 36. */
1920 send_restore_code(next, spu); /* Step 37. */
1921 set_ppu_querymask(next, spu); /* Step 38. */
1922 wait_tag_complete(next, spu); /* Step 39. */
1923 wait_spu_stopped(next, spu); /* Step 40. */
1924}
1925
1926static void restore_csa(struct spu_state *next, struct spu *spu)
1927{
1928 /*
1929 * Combine steps 41-76 of SPU context restore sequence, which
1930 * restore regions of the privileged & problem state areas.
1931 */
1932
1933 restore_spu_privcntl(next, spu); /* Step 41. */
1934 restore_status_part1(next, spu); /* Step 42. */
1935 restore_status_part2(next, spu); /* Step 43. */
1936 restore_ls_16kb(next, spu); /* Step 44. */
1937 wait_tag_complete(next, spu); /* Step 45. */
1938 suspend_mfc(next, spu); /* Step 46. */
1939 wait_suspend_mfc_complete(next, spu); /* Step 47. */
1940 issue_mfc_tlbie(next, spu); /* Step 48. */
1941 clear_interrupts(next, spu); /* Step 49. */
1942 restore_mfc_queues(next, spu); /* Step 50. */
1943 restore_ppu_querymask(next, spu); /* Step 51. */
1944 restore_ppu_querytype(next, spu); /* Step 52. */
1945 restore_mfc_csr_tsq(next, spu); /* Step 53. */
1946 restore_mfc_csr_cmd(next, spu); /* Step 54. */
1947 restore_mfc_csr_ato(next, spu); /* Step 55. */
1948 restore_mfc_tclass_id(next, spu); /* Step 56. */
1949 set_llr_event(next, spu); /* Step 57. */
1950 restore_decr_wrapped(next, spu); /* Step 58. */
1951 restore_ch_part1(next, spu); /* Step 59. */
1952 restore_ch_part2(next, spu); /* Step 60. */
1953 restore_spu_lslr(next, spu); /* Step 61. */
1954 restore_spu_cfg(next, spu); /* Step 62. */
1955 restore_pm_trace(next, spu); /* Step 63. */
1956 restore_spu_npc(next, spu); /* Step 64. */
1957 restore_spu_mb(next, spu); /* Step 65. */
1958 check_ppu_mb_stat(next, spu); /* Step 66. */
1959 check_ppuint_mb_stat(next, spu); /* Step 67. */
1960 restore_mfc_slbs(next, spu); /* Step 68. */
1961 restore_mfc_sr1(next, spu); /* Step 69. */
1962 restore_other_spu_access(next, spu); /* Step 70. */
1963 restore_spu_runcntl(next, spu); /* Step 71. */
1964 restore_mfc_cntl(next, spu); /* Step 72. */
1965 enable_user_access(next, spu); /* Step 73. */
1966 reset_switch_active(next, spu); /* Step 74. */
1967 reenable_interrupts(next, spu); /* Step 75. */
1968}
1969
1970static int __do_spu_save(struct spu_state *prev, struct spu *spu)
1971{
1972 int rc;
1973
1974 /*
1975 * SPU context save can be broken into three phases:
1976 *
1977 * (a) quiesce [steps 2-16].
1978 * (b) save of CSA, performed by PPE [steps 17-42]
1979 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
1980 *
1981 * Returns 0 on success.
1982 * 2,6 if failed to quiece SPU
1983 * 53 if SPU-side of save failed.
1984 */
1985
1986 rc = quiece_spu(prev, spu); /* Steps 2-16. */
1987 switch (rc) {
1988 default:
1989 case 2:
1990 case 6:
1991 harvest(prev, spu);
1992 return rc;
1993 break;
1994 case 0:
1995 break;
1996 }
1997 save_csa(prev, spu); /* Steps 17-43. */
1998 save_lscsa(prev, spu); /* Steps 44-53. */
1999 return check_save_status(prev, spu); /* Step 54. */
2000}
2001
2002static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2003{
2004 int rc;
2005
2006 /*
2007 * SPU context restore can be broken into three phases:
2008 *
2009 * (a) harvest (or reset) SPU [steps 2-24].
2010 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2011 * (c) restore CSA [steps 41-76], performed by PPE.
2012 *
2013 * The 'harvest' step is not performed here, but rather
2014 * as needed below.
2015 */
2016
2017 restore_lscsa(next, spu); /* Steps 24-39. */
2018 rc = check_restore_status(next, spu); /* Step 40. */
2019 switch (rc) {
2020 default:
2021 /* Failed. Return now. */
2022 return rc;
2023 break;
2024 case 0:
2025 /* Fall through to next step. */
2026 break;
2027 }
2028 restore_csa(next, spu);
2029
2030 return 0;
2031}
2032
2033/**
2034 * spu_save - SPU context save, with locking.
2035 * @prev: pointer to SPU context save area, to be saved.
2036 * @spu: pointer to SPU iomem structure.
2037 *
2038 * Acquire locks, perform the save operation then return.
2039 */
2040int spu_save(struct spu_state *prev, struct spu *spu)
2041{
2042 int rc;
2043
2044 acquire_spu_lock(spu); /* Step 1. */
2045 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2046 release_spu_lock(spu);
2047 if (rc) {
2048 panic("%s failed on SPU[%d], rc=%d.\n",
2049 __func__, spu->number, rc);
2050 }
2051 return rc;
2052}
2053
2054/**
2055 * spu_restore - SPU context restore, with harvest and locking.
2056 * @new: pointer to SPU context save area, to be restored.
2057 * @spu: pointer to SPU iomem structure.
2058 *
2059 * Perform harvest + restore, as we may not be coming
2060 * from a previous succesful save operation, and the
2061 * hardware state is unknown.
2062 */
2063int spu_restore(struct spu_state *new, struct spu *spu)
2064{
2065 int rc;
2066
2067 acquire_spu_lock(spu);
2068 harvest(NULL, spu);
2069 spu->stop_code = 0;
2070 spu->dar = 0;
2071 spu->dsisr = 0;
2072 spu->slb_replace = 0;
2073 spu->class_0_pending = 0;
2074 rc = __do_spu_restore(new, spu);
2075 release_spu_lock(spu);
2076 if (rc) {
2077 panic("%s failed on SPU[%d] rc=%d.\n",
2078 __func__, spu->number, rc);
2079 }
2080 return rc;
2081}
2082
2083/**
2084 * spu_harvest - SPU harvest (reset) operation
2085 * @spu: pointer to SPU iomem structure.
2086 *
2087 * Perform SPU harvest (reset) operation.
2088 */
2089void spu_harvest(struct spu *spu)
2090{
2091 acquire_spu_lock(spu);
2092 harvest(NULL, spu);
2093 release_spu_lock(spu);
2094}
2095
2096static void init_prob(struct spu_state *csa)
2097{
2098 csa->spu_chnlcnt_RW[9] = 1;
2099 csa->spu_chnlcnt_RW[21] = 16;
2100 csa->spu_chnlcnt_RW[23] = 1;
2101 csa->spu_chnlcnt_RW[28] = 1;
2102 csa->spu_chnlcnt_RW[30] = 1;
2103 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2104}
2105
2106static void init_priv1(struct spu_state *csa)
2107{
2108 /* Enable decode, relocate, tlbie response, master runcntl. */
2109 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2110 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2111 MFC_STATE1_PROBLEM_STATE_MASK |
2112 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2113
2114 /* Set storage description. */
2115 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
2116
2117 /* Enable OS-specific set of interrupts. */
2118 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2119 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2120 CLASS0_ENABLE_SPU_ERROR_INTR;
2121 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2122 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2123 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2124 CLASS2_ENABLE_SPU_HALT_INTR;
2125}
2126
2127static void init_priv2(struct spu_state *csa)
2128{
2129 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2130 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2131 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2132 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2133}
2134
2135/**
2136 * spu_alloc_csa - allocate and initialize an SPU context save area.
2137 *
2138 * Allocate and initialize the contents of an SPU context save area.
2139 * This includes enabling address translation, interrupt masks, etc.,
2140 * as appropriate for the given OS environment.
2141 *
2142 * Note that storage for the 'lscsa' is allocated separately,
2143 * as it is by far the largest of the context save regions,
2144 * and may need to be pinned or otherwise specially aligned.
2145 */
2146void spu_init_csa(struct spu_state *csa)
2147{
2148 struct spu_lscsa *lscsa;
2149 unsigned char *p;
2150
2151 if (!csa)
2152 return;
2153 memset(csa, 0, sizeof(struct spu_state));
2154
2155 lscsa = vmalloc(sizeof(struct spu_lscsa));
2156 if (!lscsa)
2157 return;
2158
2159 memset(lscsa, 0, sizeof(struct spu_lscsa));
2160 csa->lscsa = lscsa;
2161 csa->register_lock = SPIN_LOCK_UNLOCKED;
2162
2163 /* Set LS pages reserved to allow for user-space mapping. */
2164 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2165 SetPageReserved(vmalloc_to_page(p));
2166
2167 init_prob(csa);
2168 init_priv1(csa);
2169 init_priv2(csa);
2170}
2171
2172void spu_fini_csa(struct spu_state *csa)
2173{
2174 /* Clear reserved bit before vfree. */
2175 unsigned char *p;
2176 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2177 ClearPageReserved(vmalloc_to_page(p));
2178
2179 vfree(csa->lscsa);
2180}
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
new file mode 100644
index 00000000000..d549aa7ebea
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -0,0 +1,101 @@
1#include <linux/file.h>
2#include <linux/fs.h>
3#include <linux/module.h>
4#include <linux/mount.h>
5#include <linux/namei.h>
6
7#include <asm/uaccess.h>
8
9#include "spufs.h"
10
11/**
12 * sys_spu_run - run code loaded into an SPU
13 *
14 * @unpc: next program counter for the SPU
15 * @ustatus: status of the SPU
16 *
17 * This system call transfers the control of execution of a
18 * user space thread to an SPU. It will return when the
19 * SPU has finished executing or when it hits an error
20 * condition and it will be interrupted if a signal needs
21 * to be delivered to a handler in user space.
22 *
23 * The next program counter is set to the passed value
24 * before the SPU starts fetching code and the user space
25 * pointer gets updated with the new value when returning
26 * from kernel space.
27 *
28 * The status value returned from spu_run reflects the
29 * value of the spu_status register after the SPU has stopped.
30 *
31 */
32long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
33{
34 long ret;
35 struct spufs_inode_info *i;
36 u32 npc, status;
37
38 ret = -EFAULT;
39 if (get_user(npc, unpc) || get_user(status, ustatus))
40 goto out;
41
42 /* check if this file was created by spu_create */
43 ret = -EINVAL;
44 if (filp->f_op != &spufs_context_fops)
45 goto out;
46
47 i = SPUFS_I(filp->f_dentry->d_inode);
48 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
49
50 if (put_user(npc, unpc) || put_user(status, ustatus))
51 ret = -EFAULT;
52out:
53 return ret;
54}
55
56#ifndef MODULE
57asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
58{
59 int fput_needed;
60 struct file *filp;
61 long ret;
62
63 ret = -EBADF;
64 filp = fget_light(fd, &fput_needed);
65 if (filp) {
66 ret = do_spu_run(filp, unpc, ustatus);
67 fput_light(filp, fput_needed);
68 }
69
70 return ret;
71}
72#endif
73
74asmlinkage long sys_spu_create(const char __user *pathname,
75 unsigned int flags, mode_t mode)
76{
77 char *tmp;
78 int ret;
79
80 tmp = getname(pathname);
81 ret = PTR_ERR(tmp);
82 if (!IS_ERR(tmp)) {
83 struct nameidata nd;
84
85 ret = path_lookup(tmp, LOOKUP_PARENT|
86 LOOKUP_OPEN|LOOKUP_CREATE, &nd);
87 if (!ret) {
88 ret = spufs_create_thread(&nd, flags, mode);
89 path_release(&nd);
90 }
91 putname(tmp);
92 }
93
94 return ret;
95}
96
97struct spufs_calls spufs_calls = {
98 .create_thread = sys_spu_create,
99 .spu_run = do_spu_run,
100 .owner = THIS_MODULE,
101};
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index dda5f2c72c2..4ec8ba737e7 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -49,7 +49,6 @@
49#include <asm/hydra.h> 49#include <asm/hydra.h>
50#include <asm/sections.h> 50#include <asm/sections.h>
51#include <asm/time.h> 51#include <asm/time.h>
52#include <asm/btext.h>
53#include <asm/i8259.h> 52#include <asm/i8259.h>
54#include <asm/mpic.h> 53#include <asm/mpic.h>
55#include <asm/rtas.h> 54#include <asm/rtas.h>
@@ -58,7 +57,6 @@
58#include "chrp.h" 57#include "chrp.h"
59 58
60void rtas_indicator_progress(char *, unsigned short); 59void rtas_indicator_progress(char *, unsigned short);
61void btext_progress(char *, unsigned short);
62 60
63int _chrp_type; 61int _chrp_type;
64EXPORT_SYMBOL(_chrp_type); 62EXPORT_SYMBOL(_chrp_type);
@@ -264,11 +262,6 @@ void __init chrp_setup_arch(void)
264 ppc_md.set_rtc_time = rtas_set_rtc_time; 262 ppc_md.set_rtc_time = rtas_set_rtc_time;
265 } 263 }
266 264
267#ifdef CONFIG_BOOTX_TEXT
268 if (ppc_md.progress == NULL && boot_text_mapped)
269 ppc_md.progress = btext_progress;
270#endif
271
272#ifdef CONFIG_BLK_DEV_INITRD 265#ifdef CONFIG_BLK_DEV_INITRD
273 /* this is fine for chrp */ 266 /* this is fine for chrp */
274 initrd_below_start_ok = 1; 267 initrd_below_start_ok = 1;
@@ -522,12 +515,3 @@ void __init chrp_init(void)
522 smp_ops = &chrp_smp_ops; 515 smp_ops = &chrp_smp_ops;
523#endif /* CONFIG_SMP */ 516#endif /* CONFIG_SMP */
524} 517}
525
526#ifdef CONFIG_BOOTX_TEXT
527void
528btext_progress(char *s, unsigned short hex)
529{
530 btext_drawstring(s);
531 btext_drawstring("\n");
532}
533#endif /* CONFIG_BOOTX_TEXT */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index a58daa15368..42e978e4897 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -35,161 +35,138 @@
35#include <linux/irq.h> 35#include <linux/irq.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37 37
38#include <asm/paca.h>
38#include <asm/iseries/hv_types.h> 39#include <asm/iseries/hv_types.h>
39#include <asm/iseries/hv_lp_event.h> 40#include <asm/iseries/hv_lp_event.h>
40#include <asm/iseries/hv_call_xm.h> 41#include <asm/iseries/hv_call_xm.h>
42#include <asm/iseries/it_lp_queue.h>
41 43
42#include "irq.h" 44#include "irq.h"
43#include "call_pci.h" 45#include "call_pci.h"
44 46
45static long Pci_Interrupt_Count; 47#if defined(CONFIG_SMP)
46static long Pci_Event_Count; 48extern void iSeries_smp_message_recv(struct pt_regs *);
47 49#endif
48enum XmPciLpEvent_Subtype {
49 XmPciLpEvent_BusCreated = 0, // PHB has been created
50 XmPciLpEvent_BusError = 1, // PHB has failed
51 XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus
52 XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed
53 XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered
54 XmPciLpEvent_BusRecovered = 12, // PHB has been recovered
55 XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing
56 XmPciLpEvent_BridgeError = 21, // Bridge Error
57 XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt
58};
59
60struct XmPciLpEvent_BusInterrupt {
61 HvBusNumber busNumber;
62 HvSubBusNumber subBusNumber;
63};
64 50
65struct XmPciLpEvent_NodeInterrupt { 51enum pci_event_type {
66 HvBusNumber busNumber; 52 pe_bus_created = 0, /* PHB has been created */
67 HvSubBusNumber subBusNumber; 53 pe_bus_error = 1, /* PHB has failed */
68 HvAgentId deviceId; 54 pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
55 pe_node_failed = 4, /* Multi-adapter bridge has failed */
56 pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
57 pe_bus_recovered = 12, /* PHB has been recovered */
58 pe_unquiese_bus = 18, /* Secondary bus unqiescing */
59 pe_bridge_error = 21, /* Bridge Error */
60 pe_slot_interrupt = 22 /* Slot interrupt */
69}; 61};
70 62
71struct XmPciLpEvent { 63struct pci_event {
72 struct HvLpEvent hvLpEvent; 64 struct HvLpEvent event;
73
74 union { 65 union {
75 u64 alignData; // Align on an 8-byte boundary 66 u64 __align; /* Align on an 8-byte boundary */
76
77 struct { 67 struct {
78 u32 fisr; 68 u32 fisr;
79 HvBusNumber busNumber; 69 HvBusNumber bus_number;
80 HvSubBusNumber subBusNumber; 70 HvSubBusNumber sub_bus_number;
81 HvAgentId deviceId; 71 HvAgentId dev_id;
82 } slotInterrupt; 72 } slot;
83 73 struct {
84 struct XmPciLpEvent_BusInterrupt busFailed; 74 HvBusNumber bus_number;
85 struct XmPciLpEvent_BusInterrupt busRecovered; 75 HvSubBusNumber sub_bus_number;
86 struct XmPciLpEvent_BusInterrupt busCreated; 76 } bus;
87 77 struct {
88 struct XmPciLpEvent_NodeInterrupt nodeFailed; 78 HvBusNumber bus_number;
89 struct XmPciLpEvent_NodeInterrupt nodeRecovered; 79 HvSubBusNumber sub_bus_number;
90 80 HvAgentId dev_id;
91 } eventData; 81 } node;
92 82 } data;
93}; 83};
94 84
95static void intReceived(struct XmPciLpEvent *eventParm, 85static DEFINE_SPINLOCK(pending_irqs_lock);
96 struct pt_regs *regsParm) 86static int num_pending_irqs;
87static int pending_irqs[NR_IRQS];
88
89static void int_received(struct pci_event *event, struct pt_regs *regs)
97{ 90{
98 int irq; 91 int irq;
99#ifdef CONFIG_IRQSTACKS
100 struct thread_info *curtp, *irqtp;
101#endif
102 92
103 ++Pci_Interrupt_Count; 93 switch (event->event.xSubtype) {
104 94 case pe_slot_interrupt:
105 switch (eventParm->hvLpEvent.xSubtype) { 95 irq = event->event.xCorrelationToken;
106 case XmPciLpEvent_SlotInterrupt: 96 if (irq < NR_IRQS) {
107 irq = eventParm->hvLpEvent.xCorrelationToken; 97 spin_lock(&pending_irqs_lock);
108 /* Dispatch the interrupt handlers for this irq */ 98 pending_irqs[irq]++;
109#ifdef CONFIG_IRQSTACKS 99 num_pending_irqs++;
110 /* Switch to the irq stack to handle this */ 100 spin_unlock(&pending_irqs_lock);
111 curtp = current_thread_info(); 101 } else {
112 irqtp = hardirq_ctx[smp_processor_id()]; 102 printk(KERN_WARNING "int_received: bad irq number %d\n",
113 if (curtp != irqtp) { 103 irq);
114 irqtp->task = curtp->task; 104 HvCallPci_eoi(event->data.slot.bus_number,
115 irqtp->flags = 0; 105 event->data.slot.sub_bus_number,
116 call___do_IRQ(irq, regsParm, irqtp); 106 event->data.slot.dev_id);
117 irqtp->task = NULL; 107 }
118 if (irqtp->flags)
119 set_bits(irqtp->flags, &curtp->flags);
120 } else
121#endif
122 __do_IRQ(irq, regsParm);
123 HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
124 eventParm->eventData.slotInterrupt.subBusNumber,
125 eventParm->eventData.slotInterrupt.deviceId);
126 break; 108 break;
127 /* Ignore error recovery events for now */ 109 /* Ignore error recovery events for now */
128 case XmPciLpEvent_BusCreated: 110 case pe_bus_created:
129 printk(KERN_INFO "intReceived: system bus %d created\n", 111 printk(KERN_INFO "int_received: system bus %d created\n",
130 eventParm->eventData.busCreated.busNumber); 112 event->data.bus.bus_number);
131 break; 113 break;
132 case XmPciLpEvent_BusError: 114 case pe_bus_error:
133 case XmPciLpEvent_BusFailed: 115 case pe_bus_failed:
134 printk(KERN_INFO "intReceived: system bus %d failed\n", 116 printk(KERN_INFO "int_received: system bus %d failed\n",
135 eventParm->eventData.busFailed.busNumber); 117 event->data.bus.bus_number);
136 break; 118 break;
137 case XmPciLpEvent_BusRecovered: 119 case pe_bus_recovered:
138 case XmPciLpEvent_UnQuiesceBus: 120 case pe_unquiese_bus:
139 printk(KERN_INFO "intReceived: system bus %d recovered\n", 121 printk(KERN_INFO "int_received: system bus %d recovered\n",
140 eventParm->eventData.busRecovered.busNumber); 122 event->data.bus.bus_number);
141 break; 123 break;
142 case XmPciLpEvent_NodeFailed: 124 case pe_node_failed:
143 case XmPciLpEvent_BridgeError: 125 case pe_bridge_error:
144 printk(KERN_INFO 126 printk(KERN_INFO
145 "intReceived: multi-adapter bridge %d/%d/%d failed\n", 127 "int_received: multi-adapter bridge %d/%d/%d failed\n",
146 eventParm->eventData.nodeFailed.busNumber, 128 event->data.node.bus_number,
147 eventParm->eventData.nodeFailed.subBusNumber, 129 event->data.node.sub_bus_number,
148 eventParm->eventData.nodeFailed.deviceId); 130 event->data.node.dev_id);
149 break; 131 break;
150 case XmPciLpEvent_NodeRecovered: 132 case pe_node_recovered:
151 printk(KERN_INFO 133 printk(KERN_INFO
152 "intReceived: multi-adapter bridge %d/%d/%d recovered\n", 134 "int_received: multi-adapter bridge %d/%d/%d recovered\n",
153 eventParm->eventData.nodeRecovered.busNumber, 135 event->data.node.bus_number,
154 eventParm->eventData.nodeRecovered.subBusNumber, 136 event->data.node.sub_bus_number,
155 eventParm->eventData.nodeRecovered.deviceId); 137 event->data.node.dev_id);
156 break; 138 break;
157 default: 139 default:
158 printk(KERN_ERR 140 printk(KERN_ERR
159 "intReceived: unrecognized event subtype 0x%x\n", 141 "int_received: unrecognized event subtype 0x%x\n",
160 eventParm->hvLpEvent.xSubtype); 142 event->event.xSubtype);
161 break; 143 break;
162 } 144 }
163} 145}
164 146
165static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, 147static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs)
166 struct pt_regs *regsParm)
167{ 148{
168#ifdef CONFIG_PCI 149 if (event && (event->xType == HvLpEvent_Type_PciIo)) {
169 ++Pci_Event_Count; 150 switch (event->xFlags.xFunction) {
170
171 if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) {
172 switch (eventParm->xFlags.xFunction) {
173 case HvLpEvent_Function_Int: 151 case HvLpEvent_Function_Int:
174 intReceived((struct XmPciLpEvent *)eventParm, regsParm); 152 int_received((struct pci_event *)event, regs);
175 break; 153 break;
176 case HvLpEvent_Function_Ack: 154 case HvLpEvent_Function_Ack:
177 printk(KERN_ERR 155 printk(KERN_ERR
178 "XmPciLpEvent_handler: unexpected ack received\n"); 156 "pci_event_handler: unexpected ack received\n");
179 break; 157 break;
180 default: 158 default:
181 printk(KERN_ERR 159 printk(KERN_ERR
182 "XmPciLpEvent_handler: unexpected event function %d\n", 160 "pci_event_handler: unexpected event function %d\n",
183 (int)eventParm->xFlags.xFunction); 161 (int)event->xFlags.xFunction);
184 break; 162 break;
185 } 163 }
186 } else if (eventParm) 164 } else if (event)
187 printk(KERN_ERR 165 printk(KERN_ERR
188 "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n", 166 "pci_event_handler: Unrecognized PCI event type 0x%x\n",
189 (int)eventParm->xType); 167 (int)event->xType);
190 else 168 else
191 printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n"); 169 printk(KERN_ERR "pci_event_handler: NULL event received\n");
192#endif
193} 170}
194 171
195/* 172/*
@@ -199,20 +176,21 @@ static void XmPciLpEvent_handler(struct HvLpEvent *eventParm,
199void __init iSeries_init_IRQ(void) 176void __init iSeries_init_IRQ(void)
200{ 177{
201 /* Register PCI event handler and open an event path */ 178 /* Register PCI event handler and open an event path */
202 int xRc; 179 int ret;
203 180
204 xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, 181 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
205 &XmPciLpEvent_handler); 182 &pci_event_handler);
206 if (xRc == 0) { 183 if (ret == 0) {
207 xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); 184 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
208 if (xRc != 0) 185 if (ret != 0)
209 printk(KERN_ERR "iSeries_init_IRQ: open event path " 186 printk(KERN_ERR "iseries_init_IRQ: open event path "
210 "failed with rc 0x%x\n", xRc); 187 "failed with rc 0x%x\n", ret);
211 } else 188 } else
212 printk(KERN_ERR "iSeries_init_IRQ: register handler " 189 printk(KERN_ERR "iseries_init_IRQ: register handler "
213 "failed with rc 0x%x\n", xRc); 190 "failed with rc 0x%x\n", ret);
214} 191}
215 192
193#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
216#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) 194#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
217#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) 195#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
218#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) 196#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
@@ -221,40 +199,40 @@ void __init iSeries_init_IRQ(void)
221 * This will be called by device drivers (via enable_IRQ) 199 * This will be called by device drivers (via enable_IRQ)
222 * to enable INTA in the bridge interrupt status register. 200 * to enable INTA in the bridge interrupt status register.
223 */ 201 */
224static void iSeries_enable_IRQ(unsigned int irq) 202static void iseries_enable_IRQ(unsigned int irq)
225{ 203{
226 u32 bus, deviceId, function, mask; 204 u32 bus, dev_id, function, mask;
227 const u32 subBus = 0; 205 const u32 sub_bus = 0;
228 unsigned int rirq = virt_irq_to_real_map[irq]; 206 unsigned int rirq = virt_irq_to_real_map[irq];
229 207
230 /* The IRQ has already been locked by the caller */ 208 /* The IRQ has already been locked by the caller */
231 bus = REAL_IRQ_TO_BUS(rirq); 209 bus = REAL_IRQ_TO_BUS(rirq);
232 function = REAL_IRQ_TO_FUNC(rirq); 210 function = REAL_IRQ_TO_FUNC(rirq);
233 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; 211 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
234 212
235 /* Unmask secondary INTA */ 213 /* Unmask secondary INTA */
236 mask = 0x80000000; 214 mask = 0x80000000;
237 HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); 215 HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
238} 216}
239 217
240/* This is called by iSeries_activate_IRQs */ 218/* This is called by iseries_activate_IRQs */
241static unsigned int iSeries_startup_IRQ(unsigned int irq) 219static unsigned int iseries_startup_IRQ(unsigned int irq)
242{ 220{
243 u32 bus, deviceId, function, mask; 221 u32 bus, dev_id, function, mask;
244 const u32 subBus = 0; 222 const u32 sub_bus = 0;
245 unsigned int rirq = virt_irq_to_real_map[irq]; 223 unsigned int rirq = virt_irq_to_real_map[irq];
246 224
247 bus = REAL_IRQ_TO_BUS(rirq); 225 bus = REAL_IRQ_TO_BUS(rirq);
248 function = REAL_IRQ_TO_FUNC(rirq); 226 function = REAL_IRQ_TO_FUNC(rirq);
249 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; 227 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
250 228
251 /* Link the IRQ number to the bridge */ 229 /* Link the IRQ number to the bridge */
252 HvCallXm_connectBusUnit(bus, subBus, deviceId, irq); 230 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq);
253 231
254 /* Unmask bridge interrupts in the FISR */ 232 /* Unmask bridge interrupts in the FISR */
255 mask = 0x01010000 << function; 233 mask = 0x01010000 << function;
256 HvCallPci_unmaskFisr(bus, subBus, deviceId, mask); 234 HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
257 iSeries_enable_IRQ(irq); 235 iseries_enable_IRQ(irq);
258 return 0; 236 return 0;
259} 237}
260 238
@@ -279,78 +257,115 @@ void __init iSeries_activate_IRQs()
279} 257}
280 258
281/* this is not called anywhere currently */ 259/* this is not called anywhere currently */
282static void iSeries_shutdown_IRQ(unsigned int irq) 260static void iseries_shutdown_IRQ(unsigned int irq)
283{ 261{
284 u32 bus, deviceId, function, mask; 262 u32 bus, dev_id, function, mask;
285 const u32 subBus = 0; 263 const u32 sub_bus = 0;
286 unsigned int rirq = virt_irq_to_real_map[irq]; 264 unsigned int rirq = virt_irq_to_real_map[irq];
287 265
288 /* irq should be locked by the caller */ 266 /* irq should be locked by the caller */
289 bus = REAL_IRQ_TO_BUS(rirq); 267 bus = REAL_IRQ_TO_BUS(rirq);
290 function = REAL_IRQ_TO_FUNC(rirq); 268 function = REAL_IRQ_TO_FUNC(rirq);
291 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; 269 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
292 270
293 /* Invalidate the IRQ number in the bridge */ 271 /* Invalidate the IRQ number in the bridge */
294 HvCallXm_connectBusUnit(bus, subBus, deviceId, 0); 272 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
295 273
296 /* Mask bridge interrupts in the FISR */ 274 /* Mask bridge interrupts in the FISR */
297 mask = 0x01010000 << function; 275 mask = 0x01010000 << function;
298 HvCallPci_maskFisr(bus, subBus, deviceId, mask); 276 HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
299} 277}
300 278
301/* 279/*
302 * This will be called by device drivers (via disable_IRQ) 280 * This will be called by device drivers (via disable_IRQ)
303 * to disable INTA in the bridge interrupt status register. 281 * to disable INTA in the bridge interrupt status register.
304 */ 282 */
305static void iSeries_disable_IRQ(unsigned int irq) 283static void iseries_disable_IRQ(unsigned int irq)
306{ 284{
307 u32 bus, deviceId, function, mask; 285 u32 bus, dev_id, function, mask;
308 const u32 subBus = 0; 286 const u32 sub_bus = 0;
309 unsigned int rirq = virt_irq_to_real_map[irq]; 287 unsigned int rirq = virt_irq_to_real_map[irq];
310 288
311 /* The IRQ has already been locked by the caller */ 289 /* The IRQ has already been locked by the caller */
312 bus = REAL_IRQ_TO_BUS(rirq); 290 bus = REAL_IRQ_TO_BUS(rirq);
313 function = REAL_IRQ_TO_FUNC(rirq); 291 function = REAL_IRQ_TO_FUNC(rirq);
314 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; 292 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
315 293
316 /* Mask secondary INTA */ 294 /* Mask secondary INTA */
317 mask = 0x80000000; 295 mask = 0x80000000;
318 HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); 296 HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
319} 297}
320 298
321/* 299static void iseries_end_IRQ(unsigned int irq)
322 * This does nothing because there is not enough information
323 * provided to do the EOI HvCall. This is done by XmPciLpEvent.c
324 */
325static void iSeries_end_IRQ(unsigned int irq)
326{ 300{
301 unsigned int rirq = virt_irq_to_real_map[irq];
302
303 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
304 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
327} 305}
328 306
329static hw_irq_controller iSeries_IRQ_handler = { 307static hw_irq_controller iSeries_IRQ_handler = {
330 .typename = "iSeries irq controller", 308 .typename = "iSeries irq controller",
331 .startup = iSeries_startup_IRQ, 309 .startup = iseries_startup_IRQ,
332 .shutdown = iSeries_shutdown_IRQ, 310 .shutdown = iseries_shutdown_IRQ,
333 .enable = iSeries_enable_IRQ, 311 .enable = iseries_enable_IRQ,
334 .disable = iSeries_disable_IRQ, 312 .disable = iseries_disable_IRQ,
335 .end = iSeries_end_IRQ 313 .end = iseries_end_IRQ
336}; 314};
337 315
338/* 316/*
339 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot 317 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
340 * It calculates the irq value for the slot. 318 * It calculates the irq value for the slot.
341 * Note that subBusNumber is always 0 (at the moment at least). 319 * Note that sub_bus is always 0 (at the moment at least).
342 */ 320 */
343int __init iSeries_allocate_IRQ(HvBusNumber busNumber, 321int __init iSeries_allocate_IRQ(HvBusNumber bus,
344 HvSubBusNumber subBusNumber, HvAgentId deviceId) 322 HvSubBusNumber sub_bus, HvAgentId dev_id)
345{ 323{
346 int virtirq; 324 int virtirq;
347 unsigned int realirq; 325 unsigned int realirq;
348 u8 idsel = (deviceId >> 4); 326 u8 idsel = (dev_id >> 4);
349 u8 function = deviceId & 7; 327 u8 function = dev_id & 7;
350 328
351 realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; 329 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
330 + function;
352 virtirq = virt_irq_create_mapping(realirq); 331 virtirq = virt_irq_create_mapping(realirq);
353 332
354 irq_desc[virtirq].handler = &iSeries_IRQ_handler; 333 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
355 return virtirq; 334 return virtirq;
356} 335}
336
337/*
338 * Get the next pending IRQ.
339 */
340int iSeries_get_irq(struct pt_regs *regs)
341{
342 struct paca_struct *lpaca;
343 /* -2 means ignore this interrupt */
344 int irq = -2;
345
346 lpaca = get_paca();
347#ifdef CONFIG_SMP
348 if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
349 lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
350 iSeries_smp_message_recv(regs);
351 }
352#endif /* CONFIG_SMP */
353 if (hvlpevent_is_pending())
354 process_hvlpevents(regs);
355
356 if (num_pending_irqs) {
357 spin_lock(&pending_irqs_lock);
358 for (irq = 0; irq < NR_IRQS; irq++) {
359 if (pending_irqs[irq]) {
360 pending_irqs[irq]--;
361 num_pending_irqs--;
362 break;
363 }
364 }
365 spin_unlock(&pending_irqs_lock);
366 if (irq >= NR_IRQS)
367 irq = -2;
368 }
369
370 return irq;
371}
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
index 5f643f16ecc..b9c801ba5a4 100644
--- a/arch/powerpc/platforms/iseries/irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -4,5 +4,6 @@
4extern void iSeries_init_IRQ(void); 4extern void iSeries_init_IRQ(void);
5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); 5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
6extern void iSeries_activate_IRQs(void); 6extern void iSeries_activate_IRQs(void);
7extern int iSeries_get_irq(struct pt_regs *);
7 8
8#endif /* _ISERIES_IRQ_H */ 9#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
index bb8c91537f3..ea72385aaf0 100644
--- a/arch/powerpc/platforms/iseries/lpardata.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -225,3 +225,10 @@ struct ItVpdAreas itVpdAreas = {
225 0,0 225 0,0
226 } 226 }
227}; 227};
228
229struct ItLpRegSave iseries_reg_save[] = {
230 [0 ... (NR_CPUS-1)] = {
231 .xDesc = 0xd397d9e2, /* "LpRS" */
232 .xSize = sizeof(struct ItLpRegSave),
233 },
234};
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index da26639190d..ad5ef80500c 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -571,16 +571,6 @@ static void iSeries_show_cpuinfo(struct seq_file *m)
571 571
572/* 572/*
573 * Document me. 573 * Document me.
574 * and Implement me.
575 */
576static int iSeries_get_irq(struct pt_regs *regs)
577{
578 /* -2 means ignore this interrupt */
579 return -2;
580}
581
582/*
583 * Document me.
584 */ 574 */
585static void iSeries_restart(char *cmd) 575static void iSeries_restart(char *cmd)
586{ 576{
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 7ece8983a10..dd73e38bfb7 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -51,6 +51,7 @@
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <asm/bitops.h> 52#include <asm/bitops.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/kexec.h>
54#include <asm/pci-bridge.h> 55#include <asm/pci-bridge.h>
55#include <asm/iommu.h> 56#include <asm/iommu.h>
56#include <asm/machdep.h> 57#include <asm/machdep.h>
@@ -191,24 +192,10 @@ static void __init maple_init_early(void)
191 */ 192 */
192 hpte_init_native(); 193 hpte_init_native();
193 194
194 /* Find the serial port */
195 generic_find_legacy_serial_ports(&physport, &default_speed);
196
197 DBG("phys port addr: %lx\n", (long)physport);
198
199 if (physport) {
200 void *comport;
201 /* Map the uart for udbg. */
202 comport = (void *)ioremap(physport, 16);
203 udbg_init_uart(comport, default_speed);
204
205 DBG("Hello World !\n");
206 }
207
208 /* Setup interrupt mapping options */ 195 /* Setup interrupt mapping options */
209 ppc64_interrupt_controller = IC_OPEN_PIC; 196 ppc64_interrupt_controller = IC_OPEN_PIC;
210 197
211 iommu_init_early_u3(); 198 iommu_init_early_dart();
212 199
213 DBG(" <- maple_init_early\n"); 200 DBG(" <- maple_init_early\n");
214} 201}
@@ -270,7 +257,7 @@ static int __init maple_probe(int platform)
270 * occupies having to be broken up so the DART itself is not 257 * occupies having to be broken up so the DART itself is not
271 * part of the cacheable linar mapping 258 * part of the cacheable linar mapping
272 */ 259 */
273 alloc_u3_dart_table(); 260 alloc_dart_table();
274 261
275 return 1; 262 return 1;
276} 263}
@@ -292,4 +279,9 @@ struct machdep_calls __initdata maple_md = {
292 .calibrate_decr = generic_calibrate_decr, 279 .calibrate_decr = generic_calibrate_decr,
293 .progress = maple_progress, 280 .progress = maple_progress,
294 .idle_loop = native_idle, 281 .idle_loop = native_idle,
282#ifdef CONFIG_KEXEC
283 .machine_kexec = default_machine_kexec,
284 .machine_kexec_prepare = default_machine_kexec_prepare,
285 .machine_crash_shutdown = default_machine_crash_shutdown,
286#endif
295}; 287};
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index c9df44fcf57..78093d7f97a 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,9 +1,14 @@
1CFLAGS_bootx_init.o += -fPIC
2
1obj-y += pic.o setup.o time.o feature.o pci.o \ 3obj-y += pic.o setup.o time.o feature.o pci.o \
2 sleep.o low_i2c.o cache.o 4 sleep.o low_i2c.o cache.o pfunc_core.o \
5 pfunc_base.o
3obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o 6obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
4obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o 7obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
5obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o 8obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
6obj-$(CONFIG_NVRAM) += nvram.o 9obj-$(CONFIG_NVRAM) += nvram.o
7# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff 10# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
8obj-$(CONFIG_PPC64) += nvram.o 11obj-$(CONFIG_PPC64) += nvram.o
12obj-$(CONFIG_PPC32) += bootx_init.o
9obj-$(CONFIG_SMP) += smp.o 13obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_PPC_MERGE) += udbg_scc.o udbg_adb.o
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
new file mode 100644
index 00000000000..fa8b4d7b5de
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -0,0 +1,547 @@
1/*
2 * Early boot support code for BootX bootloader
3 *
4 * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/init.h>
16#include <linux/version.h>
17#include <asm/sections.h>
18#include <asm/prom.h>
19#include <asm/page.h>
20#include <asm/bootx.h>
21#include <asm/bootinfo.h>
22#include <asm/btext.h>
23#include <asm/io.h>
24
25#undef DEBUG
26#define SET_BOOT_BAT
27
28#ifdef DEBUG
29#define DBG(fmt...) do { bootx_printf(fmt); } while(0)
30#else
31#define DBG(fmt...) do { } while(0)
32#endif
33
34extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
35
36static unsigned long __initdata bootx_dt_strbase;
37static unsigned long __initdata bootx_dt_strend;
38static unsigned long __initdata bootx_node_chosen;
39static boot_infos_t * __initdata bootx_info;
40static char __initdata bootx_disp_path[256];
41
42/* Is boot-info compatible ? */
43#define BOOT_INFO_IS_COMPATIBLE(bi) \
44 ((bi)->compatible_version <= BOOT_INFO_VERSION)
45#define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2)
46#define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4)
47
48#ifdef CONFIG_BOOTX_TEXT
49static void __init bootx_printf(const char *format, ...)
50{
51 const char *p, *q, *s;
52 va_list args;
53 unsigned long v;
54
55 va_start(args, format);
56 for (p = format; *p != 0; p = q) {
57 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
58 ;
59 if (q > p)
60 btext_drawtext(p, q - p);
61 if (*q == 0)
62 break;
63 if (*q == '\n') {
64 ++q;
65 btext_flushline();
66 btext_drawstring("\r\n");
67 btext_flushline();
68 continue;
69 }
70 ++q;
71 if (*q == 0)
72 break;
73 switch (*q) {
74 case 's':
75 ++q;
76 s = va_arg(args, const char *);
77 if (s == NULL)
78 s = "<NULL>";
79 btext_drawstring(s);
80 break;
81 case 'x':
82 ++q;
83 v = va_arg(args, unsigned long);
84 btext_drawhex(v);
85 break;
86 }
87 }
88}
89#else /* CONFIG_BOOTX_TEXT */
90static void __init bootx_printf(const char *format, ...) {}
91#endif /* CONFIG_BOOTX_TEXT */
92
93static void * __init bootx_early_getprop(unsigned long base,
94 unsigned long node,
95 char *prop)
96{
97 struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
98 u32 *ppp = &np->properties;
99
100 while(*ppp) {
101 struct bootx_dt_prop *pp =
102 (struct bootx_dt_prop *)(base + *ppp);
103
104 if (strcmp((char *)((unsigned long)pp->name + base),
105 prop) == 0) {
106 return (void *)((unsigned long)pp->value + base);
107 }
108 ppp = &pp->next;
109 }
110 return NULL;
111}
112
113#define dt_push_token(token, mem) \
114 do { \
115 *(mem) = _ALIGN_UP(*(mem),4); \
116 *((u32 *)*(mem)) = token; \
117 *(mem) += 4; \
118 } while(0)
119
120static unsigned long __init bootx_dt_find_string(char *str)
121{
122 char *s, *os;
123
124 s = os = (char *)bootx_dt_strbase;
125 s += 4;
126 while (s < (char *)bootx_dt_strend) {
127 if (strcmp(s, str) == 0)
128 return s - os;
129 s += strlen(s) + 1;
130 }
131 return 0;
132}
133
134static void __init bootx_dt_add_prop(char *name, void *data, int size,
135 unsigned long *mem_end)
136{
137 unsigned long soff = bootx_dt_find_string(name);
138 if (data == NULL)
139 size = 0;
140 if (soff == 0) {
141 bootx_printf("WARNING: Can't find string index for <%s>\n",
142 name);
143 return;
144 }
145 if (size > 0x20000) {
146 bootx_printf("WARNING: ignoring large property ");
147 bootx_printf("%s length 0x%x\n", name, size);
148 return;
149 }
150 dt_push_token(OF_DT_PROP, mem_end);
151 dt_push_token(size, mem_end);
152 dt_push_token(soff, mem_end);
153
154 /* push property content */
155 if (size && data) {
156 memcpy((void *)*mem_end, data, size);
157 *mem_end = _ALIGN_UP(*mem_end + size, 4);
158 }
159}
160
161static void __init bootx_add_chosen_props(unsigned long base,
162 unsigned long *mem_end)
163{
164 u32 val = _MACH_Pmac;
165
166 bootx_dt_add_prop("linux,platform", &val, 4, mem_end);
167
168 if (bootx_info->kernelParamsOffset) {
169 char *args = (char *)((unsigned long)bootx_info) +
170 bootx_info->kernelParamsOffset;
171 bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end);
172 }
173 if (bootx_info->ramDisk) {
174 val = ((unsigned long)bootx_info) + bootx_info->ramDisk;
175 bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end);
176 val += bootx_info->ramDiskSize;
177 bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end);
178 }
179 if (strlen(bootx_disp_path))
180 bootx_dt_add_prop("linux,stdout-path", bootx_disp_path,
181 strlen(bootx_disp_path) + 1, mem_end);
182}
183
184static void __init bootx_add_display_props(unsigned long base,
185 unsigned long *mem_end)
186{
187 bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end);
188 bootx_dt_add_prop("linux,opened", NULL, 0, mem_end);
189}
190
191static void __init bootx_dt_add_string(char *s, unsigned long *mem_end)
192{
193 unsigned int l = strlen(s) + 1;
194 memcpy((void *)*mem_end, s, l);
195 bootx_dt_strend = *mem_end = *mem_end + l;
196}
197
198static void __init bootx_scan_dt_build_strings(unsigned long base,
199 unsigned long node,
200 unsigned long *mem_end)
201{
202 struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
203 u32 *cpp, *ppp = &np->properties;
204 unsigned long soff;
205 char *namep;
206
207 /* Keep refs to known nodes */
208 namep = np->full_name ? (char *)(base + np->full_name) : NULL;
209 if (namep == NULL) {
210 bootx_printf("Node without a full name !\n");
211 namep = "";
212 }
213 DBG("* strings: %s\n", namep);
214
215 if (!strcmp(namep, "/chosen")) {
216 DBG(" detected /chosen ! adding properties names !\n");
217 bootx_dt_add_string("linux,platform", mem_end);
218 bootx_dt_add_string("linux,stdout-path", mem_end);
219 bootx_dt_add_string("linux,initrd-start", mem_end);
220 bootx_dt_add_string("linux,initrd-end", mem_end);
221 bootx_dt_add_string("bootargs", mem_end);
222 bootx_node_chosen = node;
223 }
224 if (node == bootx_info->dispDeviceRegEntryOffset) {
225 DBG(" detected display ! adding properties names !\n");
226 bootx_dt_add_string("linux,boot-display", mem_end);
227 bootx_dt_add_string("linux,opened", mem_end);
228 strncpy(bootx_disp_path, namep, 255);
229 }
230
231 /* get and store all property names */
232 while (*ppp) {
233 struct bootx_dt_prop *pp =
234 (struct bootx_dt_prop *)(base + *ppp);
235
236 namep = pp->name ? (char *)(base + pp->name) : NULL;
237 if (namep == NULL || strcmp(namep, "name") == 0)
238 goto next;
239 /* get/create string entry */
240 soff = bootx_dt_find_string(namep);
241 if (soff == 0)
242 bootx_dt_add_string(namep, mem_end);
243 next:
244 ppp = &pp->next;
245 }
246
247 /* do all our children */
248 cpp = &np->child;
249 while(*cpp) {
250 np = (struct bootx_dt_node *)(base + *cpp);
251 bootx_scan_dt_build_strings(base, *cpp, mem_end);
252 cpp = &np->sibling;
253 }
254}
255
256static void __init bootx_scan_dt_build_struct(unsigned long base,
257 unsigned long node,
258 unsigned long *mem_end)
259{
260 struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
261 u32 *cpp, *ppp = &np->properties;
262 char *namep, *p, *ep, *lp;
263 int l;
264
265 dt_push_token(OF_DT_BEGIN_NODE, mem_end);
266
267 /* get the node's full name */
268 namep = np->full_name ? (char *)(base + np->full_name) : NULL;
269 if (namep == NULL)
270 namep = "";
271 l = strlen(namep);
272
273 DBG("* struct: %s\n", namep);
274
275 /* Fixup an Apple bug where they have bogus \0 chars in the
276 * middle of the path in some properties, and extract
277 * the unit name (everything after the last '/').
278 */
279 memcpy((void *)*mem_end, namep, l + 1);
280 namep = (char *)*mem_end;
281 for (lp = p = namep, ep = namep + l; p < ep; p++) {
282 if (*p == '/')
283 lp = namep;
284 else if (*p != 0)
285 *lp++ = *p;
286 }
287 *lp = 0;
288 *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
289
290 /* get and store all properties */
291 while (*ppp) {
292 struct bootx_dt_prop *pp =
293 (struct bootx_dt_prop *)(base + *ppp);
294
295 namep = pp->name ? (char *)(base + pp->name) : NULL;
296 /* Skip "name" */
297 if (namep == NULL || !strcmp(namep, "name"))
298 goto next;
299 /* Skip "bootargs" in /chosen too as we replace it */
300 if (node == bootx_node_chosen && !strcmp(namep, "bootargs"))
301 goto next;
302
303 /* push property head */
304 bootx_dt_add_prop(namep,
305 pp->value ? (void *)(base + pp->value): NULL,
306 pp->length, mem_end);
307 next:
308 ppp = &pp->next;
309 }
310
311 if (node == bootx_node_chosen)
312 bootx_add_chosen_props(base, mem_end);
313 if (node == bootx_info->dispDeviceRegEntryOffset)
314 bootx_add_display_props(base, mem_end);
315
316 /* do all our children */
317 cpp = &np->child;
318 while(*cpp) {
319 np = (struct bootx_dt_node *)(base + *cpp);
320 bootx_scan_dt_build_struct(base, *cpp, mem_end);
321 cpp = &np->sibling;
322 }
323
324 dt_push_token(OF_DT_END_NODE, mem_end);
325}
326
327static unsigned long __init bootx_flatten_dt(unsigned long start)
328{
329 boot_infos_t *bi = bootx_info;
330 unsigned long mem_start, mem_end;
331 struct boot_param_header *hdr;
332 unsigned long base;
333 u64 *rsvmap;
334
335 /* Start using memory after the big blob passed by BootX, get
336 * some space for the header
337 */
338 mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
339 DBG("Boot params header at: %x\n", mem_start);
340 hdr = (struct boot_param_header *)mem_start;
341 mem_end += sizeof(struct boot_param_header);
342 rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
343 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
344 mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
345
346 /* Get base of tree */
347 base = ((unsigned long)bi) + bi->deviceTreeOffset;
348
349 /* Build string array */
350 DBG("Building string array at: %x\n", mem_end);
351 DBG("Device Tree Base=%x\n", base);
352 bootx_dt_strbase = mem_end;
353 mem_end += 4;
354 bootx_dt_strend = mem_end;
355 bootx_scan_dt_build_strings(base, 4, &mem_end);
356 hdr->off_dt_strings = bootx_dt_strbase - mem_start;
357 hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
358
359 /* Build structure */
360 mem_end = _ALIGN(mem_end, 16);
361 DBG("Building device tree structure at: %x\n", mem_end);
362 hdr->off_dt_struct = mem_end - mem_start;
363 bootx_scan_dt_build_struct(base, 4, &mem_end);
364 dt_push_token(OF_DT_END, &mem_end);
365
366 /* Finish header */
367 hdr->boot_cpuid_phys = 0;
368 hdr->magic = OF_DT_HEADER;
369 hdr->totalsize = mem_end - mem_start;
370 hdr->version = OF_DT_VERSION;
371 /* Version 16 is not backward compatible */
372 hdr->last_comp_version = 0x10;
373
374 /* Reserve the whole thing and copy the reserve map in, we
375 * also bump mem_reserve_cnt to cause further reservations to
376 * fail since it's too late.
377 */
378 mem_end = _ALIGN(mem_end, PAGE_SIZE);
379 DBG("End of boot params: %x\n", mem_end);
380 rsvmap[0] = mem_start;
381 rsvmap[1] = mem_end;
382 rsvmap[2] = 0;
383 rsvmap[3] = 0;
384
385 return (unsigned long)hdr;
386}
387
388
389#ifdef CONFIG_BOOTX_TEXT
390static void __init btext_welcome(boot_infos_t *bi)
391{
392 unsigned long flags;
393 unsigned long pvr;
394
395 bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n");
396 bootx_printf("\nlinked at : 0x%x", KERNELBASE);
397 bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
398 bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
399 bootx_printf(" (log)");
400 bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
401 bootx_printf("\nboot_info at : 0x%x", bi);
402 __asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
403 bootx_printf("\nMSR : 0x%x", flags);
404 __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr));
405 bootx_printf("\nPVR : 0x%x", pvr);
406 pvr >>= 16;
407 if (pvr > 1) {
408 __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags));
409 bootx_printf("\nHID0 : 0x%x", flags);
410 }
411 if (pvr == 8 || pvr == 12 || pvr == 0x800c) {
412 __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags));
413 bootx_printf("\nICTC : 0x%x", flags);
414 }
415#ifdef DEBUG
416 bootx_printf("\n\n");
417 bootx_printf("bi->deviceTreeOffset : 0x%x\n",
418 bi->deviceTreeOffset);
419 bootx_printf("bi->deviceTreeSize : 0x%x\n",
420 bi->deviceTreeSize);
421#endif
422 bootx_printf("\n\n");
423}
424#endif /* CONFIG_BOOTX_TEXT */
425
426void __init bootx_init(unsigned long r3, unsigned long r4)
427{
428 boot_infos_t *bi = (boot_infos_t *) r4;
429 unsigned long hdr;
430 unsigned long space;
431 unsigned long ptr, x;
432 char *model;
433 unsigned long offset = reloc_offset();
434
435 reloc_got2(offset);
436
437 bootx_info = bi;
438
439 /* We haven't cleared any bss at this point, make sure
440 * what we need is initialized
441 */
442 bootx_dt_strbase = bootx_dt_strend = 0;
443 bootx_node_chosen = 0;
444 bootx_disp_path[0] = 0;
445
446 if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
447 bi->logicalDisplayBase = bi->dispDeviceBase;
448
449#ifdef CONFIG_BOOTX_TEXT
450 btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0],
451 bi->dispDeviceRect[3] - bi->dispDeviceRect[1],
452 bi->dispDeviceDepth, bi->dispDeviceRowBytes,
453 (unsigned long)bi->logicalDisplayBase);
454 btext_clearscreen();
455 btext_flushscreen();
456#endif /* CONFIG_BOOTX_TEXT */
457
458 /*
459 * Test if boot-info is compatible. Done only in config
460 * CONFIG_BOOTX_TEXT since there is nothing much we can do
461 * with an incompatible version, except display a message
462 * and eventually hang the processor...
463 *
464 * I'll try to keep enough of boot-info compatible in the
465 * future to always allow display of this message;
466 */
467 if (!BOOT_INFO_IS_COMPATIBLE(bi)) {
468 bootx_printf(" !!! WARNING - Incompatible version"
469 " of BootX !!!\n\n\n");
470 for (;;)
471 ;
472 }
473 if (bi->architecture != BOOT_ARCH_PCI) {
474 bootx_printf(" !!! WARNING - Usupported machine"
475 " architecture !\n");
476 for (;;)
477 ;
478 }
479
480#ifdef CONFIG_BOOTX_TEXT
481 btext_welcome(bi);
482#endif
483 /* New BootX enters kernel with MMU off, i/os are not allowed
484 * here. This hack will have been done by the boostrap anyway.
485 */
486 if (bi->version < 4) {
487 /*
488 * XXX If this is an iMac, turn off the USB controller.
489 */
490 model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset,
491 4, "model");
492 if (model
493 && (strcmp(model, "iMac,1") == 0
494 || strcmp(model, "PowerMac1,1") == 0)) {
495 bootx_printf("iMac,1 detected, shutting down USB \n");
496 out_le32((unsigned *)0x80880008, 1); /* XXX */
497 }
498 }
499
500 /* Get a pointer that points above the device tree, args, ramdisk,
501 * etc... to use for generating the flattened tree
502 */
503 if (bi->version < 5) {
504 space = bi->deviceTreeOffset + bi->deviceTreeSize;
505 if (bi->ramDisk)
506 space = bi->ramDisk + bi->ramDiskSize;
507 } else
508 space = bi->totalParamsSize;
509
510 bootx_printf("Total space used by parameters & ramdisk: %x \n", space);
511
512 /* New BootX will have flushed all TLBs and enters kernel with
513 * MMU switched OFF, so this should not be useful anymore.
514 */
515 if (bi->version < 4) {
516 bootx_printf("Touching pages...\n");
517
518 /*
519 * Touch each page to make sure the PTEs for them
520 * are in the hash table - the aim is to try to avoid
521 * getting DSI exceptions while copying the kernel image.
522 */
523 for (ptr = ((unsigned long) &_stext) & PAGE_MASK;
524 ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
525 x = *(volatile unsigned long *)ptr;
526 }
527
528 /* Ok, now we need to generate a flattened device-tree to pass
529 * to the kernel
530 */
531 bootx_printf("Preparing boot params...\n");
532
533 hdr = bootx_flatten_dt(space);
534
535#ifdef CONFIG_BOOTX_TEXT
536#ifdef SET_BOOT_BAT
537 bootx_printf("Preparing BAT...\n");
538 btext_prepare_BAT();
539#else
540 btext_unmap();
541#endif
542#endif
543
544 reloc_got2(-offset);
545
546 __start(hdr, KERNELBASE + offset, 0);
547}
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 39150342c6f..a4b50c4109c 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -28,6 +28,7 @@
28#include <asm/cputable.h> 28#include <asm/cputable.h>
29#include <asm/time.h> 29#include <asm/time.h>
30#include <asm/smu.h> 30#include <asm/smu.h>
31#include <asm/pmac_pfunc.h>
31 32
32#undef DEBUG 33#undef DEBUG
33 34
@@ -85,6 +86,10 @@ static u32 *g5_pmode_data;
85static int g5_pmode_max; 86static int g5_pmode_max;
86static int g5_pmode_cur; 87static int g5_pmode_cur;
87 88
89static void (*g5_switch_volt)(int speed_mode);
90static int (*g5_switch_freq)(int speed_mode);
91static int (*g5_query_freq)(void);
92
88static DECLARE_MUTEX(g5_switch_mutex); 93static DECLARE_MUTEX(g5_switch_mutex);
89 94
90 95
@@ -92,9 +97,11 @@ static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
92static int g5_fvt_count; /* number of op. points */ 97static int g5_fvt_count; /* number of op. points */
93static int g5_fvt_cur; /* current op. point */ 98static int g5_fvt_cur; /* current op. point */
94 99
95/* ----------------- real hardware interface */ 100/*
101 * SMU based voltage switching for Neo2 platforms
102 */
96 103
97static void g5_switch_volt(int speed_mode) 104static void g5_smu_switch_volt(int speed_mode)
98{ 105{
99 struct smu_simple_cmd cmd; 106 struct smu_simple_cmd cmd;
100 107
@@ -105,26 +112,57 @@ static void g5_switch_volt(int speed_mode)
105 wait_for_completion(&comp); 112 wait_for_completion(&comp);
106} 113}
107 114
108static int g5_switch_freq(int speed_mode) 115/*
116 * Platform function based voltage/vdnap switching for Neo2
117 */
118
119static struct pmf_function *pfunc_set_vdnap0;
120static struct pmf_function *pfunc_vdnap0_complete;
121
122static void g5_vdnap_switch_volt(int speed_mode)
109{ 123{
110 struct cpufreq_freqs freqs; 124 struct pmf_args args;
111 int to; 125 u32 slew, done = 0;
126 unsigned long timeout;
112 127
113 if (g5_pmode_cur == speed_mode) 128 slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
114 return 0; 129 args.count = 1;
130 args.u[0].p = &slew;
115 131
116 down(&g5_switch_mutex); 132 pmf_call_one(pfunc_set_vdnap0, &args);
117 133
118 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; 134 /* It's an irq GPIO so we should be able to just block here,
119 freqs.new = g5_cpu_freqs[speed_mode].frequency; 135 * I'll do that later after I've properly tested the IRQ code for
120 freqs.cpu = 0; 136 * platform functions
137 */
138 timeout = jiffies + HZ/10;
139 while(!time_after(jiffies, timeout)) {
140 args.count = 1;
141 args.u[0].p = &done;
142 pmf_call_one(pfunc_vdnap0_complete, &args);
143 if (done)
144 break;
145 msleep(1);
146 }
147 if (done == 0)
148 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
149}
121 150
122 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 151
152/*
153 * SCOM based frequency switching for 970FX rev3
154 */
155static int g5_scom_switch_freq(int speed_mode)
156{
157 unsigned long flags;
158 int to;
123 159
124 /* If frequency is going up, first ramp up the voltage */ 160 /* If frequency is going up, first ramp up the voltage */
125 if (speed_mode < g5_pmode_cur) 161 if (speed_mode < g5_pmode_cur)
126 g5_switch_volt(speed_mode); 162 g5_switch_volt(speed_mode);
127 163
164 local_irq_save(flags);
165
128 /* Clear PCR high */ 166 /* Clear PCR high */
129 scom970_write(SCOM_PCR, 0); 167 scom970_write(SCOM_PCR, 0);
130 /* Clear PCR low */ 168 /* Clear PCR low */
@@ -147,6 +185,8 @@ static int g5_switch_freq(int speed_mode)
147 udelay(100); 185 udelay(100);
148 } 186 }
149 187
188 local_irq_restore(flags);
189
150 /* If frequency is going down, last ramp the voltage */ 190 /* If frequency is going down, last ramp the voltage */
151 if (speed_mode > g5_pmode_cur) 191 if (speed_mode > g5_pmode_cur)
152 g5_switch_volt(speed_mode); 192 g5_switch_volt(speed_mode);
@@ -154,14 +194,10 @@ static int g5_switch_freq(int speed_mode)
154 g5_pmode_cur = speed_mode; 194 g5_pmode_cur = speed_mode;
155 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; 195 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
156 196
157 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
158
159 up(&g5_switch_mutex);
160
161 return 0; 197 return 0;
162} 198}
163 199
164static int g5_query_freq(void) 200static int g5_scom_query_freq(void)
165{ 201{
166 unsigned long psr = scom970_read(SCOM_PSR); 202 unsigned long psr = scom970_read(SCOM_PSR);
167 int i; 203 int i;
@@ -173,7 +209,104 @@ static int g5_query_freq(void)
173 return i; 209 return i;
174} 210}
175 211
176/* ----------------- cpufreq bookkeeping */ 212/*
213 * Platform function based voltage switching for PowerMac7,2 & 7,3
214 */
215
216static struct pmf_function *pfunc_cpu0_volt_high;
217static struct pmf_function *pfunc_cpu0_volt_low;
218static struct pmf_function *pfunc_cpu1_volt_high;
219static struct pmf_function *pfunc_cpu1_volt_low;
220
221static void g5_pfunc_switch_volt(int speed_mode)
222{
223 if (speed_mode == CPUFREQ_HIGH) {
224 if (pfunc_cpu0_volt_high)
225 pmf_call_one(pfunc_cpu0_volt_high, NULL);
226 if (pfunc_cpu1_volt_high)
227 pmf_call_one(pfunc_cpu1_volt_high, NULL);
228 } else {
229 if (pfunc_cpu0_volt_low)
230 pmf_call_one(pfunc_cpu0_volt_low, NULL);
231 if (pfunc_cpu1_volt_low)
232 pmf_call_one(pfunc_cpu1_volt_low, NULL);
233 }
234 msleep(10); /* should be faster , to fix */
235}
236
237/*
238 * Platform function based frequency switching for PowerMac7,2 & 7,3
239 */
240
241static struct pmf_function *pfunc_cpu_setfreq_high;
242static struct pmf_function *pfunc_cpu_setfreq_low;
243static struct pmf_function *pfunc_cpu_getfreq;
244static struct pmf_function *pfunc_slewing_done;;
245
246static int g5_pfunc_switch_freq(int speed_mode)
247{
248 struct pmf_args args;
249 u32 done = 0;
250 unsigned long timeout;
251
252 /* If frequency is going up, first ramp up the voltage */
253 if (speed_mode < g5_pmode_cur)
254 g5_switch_volt(speed_mode);
255
256 /* Do it */
257 if (speed_mode == CPUFREQ_HIGH)
258 pmf_call_one(pfunc_cpu_setfreq_high, NULL);
259 else
260 pmf_call_one(pfunc_cpu_setfreq_low, NULL);
261
262 /* It's an irq GPIO so we should be able to just block here,
263 * I'll do that later after I've properly tested the IRQ code for
264 * platform functions
265 */
266 timeout = jiffies + HZ/10;
267 while(!time_after(jiffies, timeout)) {
268 args.count = 1;
269 args.u[0].p = &done;
270 pmf_call_one(pfunc_slewing_done, &args);
271 if (done)
272 break;
273 msleep(1);
274 }
275 if (done == 0)
276 printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
277
278 /* If frequency is going down, last ramp the voltage */
279 if (speed_mode > g5_pmode_cur)
280 g5_switch_volt(speed_mode);
281
282 g5_pmode_cur = speed_mode;
283 ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
284
285 return 0;
286}
287
288static int g5_pfunc_query_freq(void)
289{
290 struct pmf_args args;
291 u32 val = 0;
292
293 args.count = 1;
294 args.u[0].p = &val;
295 pmf_call_one(pfunc_cpu_getfreq, &args);
296 return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
297}
298
299/*
300 * Fake voltage switching for platforms with missing support
301 */
302
303static void g5_dummy_switch_volt(int speed_mode)
304{
305}
306
307/*
308 * Common interface to the cpufreq core
309 */
177 310
178static int g5_cpufreq_verify(struct cpufreq_policy *policy) 311static int g5_cpufreq_verify(struct cpufreq_policy *policy)
179{ 312{
@@ -183,13 +316,30 @@ static int g5_cpufreq_verify(struct cpufreq_policy *policy)
183static int g5_cpufreq_target(struct cpufreq_policy *policy, 316static int g5_cpufreq_target(struct cpufreq_policy *policy,
184 unsigned int target_freq, unsigned int relation) 317 unsigned int target_freq, unsigned int relation)
185{ 318{
186 unsigned int newstate = 0; 319 unsigned int newstate = 0;
320 struct cpufreq_freqs freqs;
321 int rc;
187 322
188 if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, 323 if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
189 target_freq, relation, &newstate)) 324 target_freq, relation, &newstate))
190 return -EINVAL; 325 return -EINVAL;
191 326
192 return g5_switch_freq(newstate); 327 if (g5_pmode_cur == newstate)
328 return 0;
329
330 down(&g5_switch_mutex);
331
332 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
333 freqs.new = g5_cpu_freqs[newstate].frequency;
334 freqs.cpu = 0;
335
336 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
337 rc = g5_switch_freq(newstate);
338 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
339
340 up(&g5_switch_mutex);
341
342 return rc;
193} 343}
194 344
195static unsigned int g5_cpufreq_get_speed(unsigned int cpu) 345static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -205,6 +355,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
205 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 355 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
206 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 356 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
207 policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; 357 policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
358 policy->cpus = cpu_possible_map;
208 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); 359 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
209 360
210 return cpufreq_frequency_table_cpuinfo(policy, 361 return cpufreq_frequency_table_cpuinfo(policy,
@@ -224,19 +375,39 @@ static struct cpufreq_driver g5_cpufreq_driver = {
224}; 375};
225 376
226 377
227static int __init g5_cpufreq_init(void) 378static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
228{ 379{
229 struct device_node *cpunode; 380 struct device_node *cpunode;
230 unsigned int psize, ssize; 381 unsigned int psize, ssize;
231 struct smu_sdbp_header *shdr;
232 unsigned long max_freq; 382 unsigned long max_freq;
233 u32 *valp; 383 char *freq_method, *volt_method;
384 u32 *valp, pvr_hi;
385 int use_volts_vdnap = 0;
386 int use_volts_smu = 0;
234 int rc = -ENODEV; 387 int rc = -ENODEV;
235 388
236 /* Look for CPU and SMU nodes */ 389 /* Check supported platforms */
237 cpunode = of_find_node_by_type(NULL, "cpu"); 390 if (machine_is_compatible("PowerMac8,1") ||
238 if (!cpunode) { 391 machine_is_compatible("PowerMac8,2") ||
239 DBG("No CPU node !\n"); 392 machine_is_compatible("PowerMac9,1"))
393 use_volts_smu = 1;
394 else if (machine_is_compatible("PowerMac11,2"))
395 use_volts_vdnap = 1;
396 else
397 return -ENODEV;
398
399 /* Get first CPU node */
400 for (cpunode = NULL;
401 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
402 u32 *reg =
403 (u32 *)get_property(cpunode, "reg", NULL);
404 if (reg == NULL || (*reg) != 0)
405 continue;
406 if (!strcmp(cpunode->type, "cpu"))
407 break;
408 }
409 if (cpunode == NULL) {
410 printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
240 return -ENODEV; 411 return -ENODEV;
241 } 412 }
242 413
@@ -246,8 +417,9 @@ static int __init g5_cpufreq_init(void)
246 DBG("No cpu-version property !\n"); 417 DBG("No cpu-version property !\n");
247 goto bail_noprops; 418 goto bail_noprops;
248 } 419 }
249 if (((*valp) >> 16) != 0x3c) { 420 pvr_hi = (*valp) >> 16;
250 DBG("Wrong CPU version: %08x\n", *valp); 421 if (pvr_hi != 0x3c && pvr_hi != 0x44) {
422 printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
251 goto bail_noprops; 423 goto bail_noprops;
252 } 424 }
253 425
@@ -259,18 +431,50 @@ static int __init g5_cpufreq_init(void)
259 } 431 }
260 g5_pmode_max = psize / sizeof(u32) - 1; 432 g5_pmode_max = psize / sizeof(u32) - 1;
261 433
262 /* Look for the FVT table */ 434 if (use_volts_smu) {
263 shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); 435 struct smu_sdbp_header *shdr;
264 if (!shdr) 436
265 goto bail_noprops; 437 /* Look for the FVT table */
266 g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; 438 shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
267 ssize = (shdr->len * sizeof(u32)) - sizeof(struct smu_sdbp_header); 439 if (!shdr)
268 g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt); 440 goto bail_noprops;
269 g5_fvt_cur = 0; 441 g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
270 442 ssize = (shdr->len * sizeof(u32)) -
271 /* Sanity checking */ 443 sizeof(struct smu_sdbp_header);
272 if (g5_fvt_count < 1 || g5_pmode_max < 1) 444 g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
273 goto bail_noprops; 445 g5_fvt_cur = 0;
446
447 /* Sanity checking */
448 if (g5_fvt_count < 1 || g5_pmode_max < 1)
449 goto bail_noprops;
450
451 g5_switch_volt = g5_smu_switch_volt;
452 volt_method = "SMU";
453 } else if (use_volts_vdnap) {
454 struct device_node *root;
455
456 root = of_find_node_by_path("/");
457 if (root == NULL) {
458 printk(KERN_ERR "cpufreq: Can't find root of "
459 "device tree\n");
460 goto bail_noprops;
461 }
462 pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
463 pfunc_vdnap0_complete =
464 pmf_find_function(root, "slewing-done");
465 if (pfunc_set_vdnap0 == NULL ||
466 pfunc_vdnap0_complete == NULL) {
467 printk(KERN_ERR "cpufreq: Can't find required "
468 "platform function\n");
469 goto bail_noprops;
470 }
471
472 g5_switch_volt = g5_vdnap_switch_volt;
473 volt_method = "GPIO";
474 } else {
475 g5_switch_volt = g5_dummy_switch_volt;
476 volt_method = "none";
477 }
274 478
275 /* 479 /*
276 * From what I see, clock-frequency is always the maximal frequency. 480 * From what I see, clock-frequency is always the maximal frequency.
@@ -286,19 +490,23 @@ static int __init g5_cpufreq_init(void)
286 g5_cpu_freqs[0].frequency = max_freq; 490 g5_cpu_freqs[0].frequency = max_freq;
287 g5_cpu_freqs[1].frequency = max_freq/2; 491 g5_cpu_freqs[1].frequency = max_freq/2;
288 492
289 /* Check current frequency */ 493 /* Set callbacks */
290 g5_pmode_cur = g5_query_freq(); 494 g5_switch_freq = g5_scom_switch_freq;
291 if (g5_pmode_cur > 1) 495 g5_query_freq = g5_scom_query_freq;
292 /* We don't support anything but 1:1 and 1:2, fixup ... */ 496 freq_method = "SCOM";
293 g5_pmode_cur = 1;
294 497
295 /* Force apply current frequency to make sure everything is in 498 /* Force apply current frequency to make sure everything is in
296 * sync (voltage is right for example). Firmware may leave us with 499 * sync (voltage is right for example). Firmware may leave us with
297 * a strange setting ... 500 * a strange setting ...
298 */ 501 */
299 g5_switch_freq(g5_pmode_cur); 502 g5_switch_volt(CPUFREQ_HIGH);
503 msleep(10);
504 g5_pmode_cur = -1;
505 g5_switch_freq(g5_query_freq());
300 506
301 printk(KERN_INFO "Registering G5 CPU frequency driver\n"); 507 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
508 printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
509 freq_method, volt_method);
302 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", 510 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
303 g5_cpu_freqs[1].frequency/1000, 511 g5_cpu_freqs[1].frequency/1000,
304 g5_cpu_freqs[0].frequency/1000, 512 g5_cpu_freqs[0].frequency/1000,
@@ -317,6 +525,200 @@ static int __init g5_cpufreq_init(void)
317 return rc; 525 return rc;
318} 526}
319 527
528static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
529{
530 struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
531 u8 *eeprom = NULL;
532 u32 *valp;
533 u64 max_freq, min_freq, ih, il;
534 int has_volt = 1, rc = 0;
535
536 /* Get first CPU node */
537 for (cpunode = NULL;
538 (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
539 if (!strcmp(cpunode->type, "cpu"))
540 break;
541 }
542 if (cpunode == NULL) {
543 printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
544 return -ENODEV;
545 }
546
547 /* Lookup the cpuid eeprom node */
548 cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
549 if (cpuid != NULL)
550 eeprom = (u8 *)get_property(cpuid, "cpuid", NULL);
551 if (eeprom == NULL) {
552 printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
553 rc = -ENODEV;
554 goto bail;
555 }
556
557 /* Lookup the i2c hwclock */
558 for (hwclock = NULL;
559 (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){
560 char *loc = get_property(hwclock, "hwctrl-location", NULL);
561 if (loc == NULL)
562 continue;
563 if (strcmp(loc, "CPU CLOCK"))
564 continue;
565 if (!get_property(hwclock, "platform-get-frequency", NULL))
566 continue;
567 break;
568 }
569 if (hwclock == NULL) {
570 printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
571 rc = -ENODEV;
572 goto bail;
573 }
574
575 DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
576
577 /* Now get all the platform functions */
578 pfunc_cpu_getfreq =
579 pmf_find_function(hwclock, "get-frequency");
580 pfunc_cpu_setfreq_high =
581 pmf_find_function(hwclock, "set-frequency-high");
582 pfunc_cpu_setfreq_low =
583 pmf_find_function(hwclock, "set-frequency-low");
584 pfunc_slewing_done =
585 pmf_find_function(hwclock, "slewing-done");
586 pfunc_cpu0_volt_high =
587 pmf_find_function(hwclock, "set-voltage-high-0");
588 pfunc_cpu0_volt_low =
589 pmf_find_function(hwclock, "set-voltage-low-0");
590 pfunc_cpu1_volt_high =
591 pmf_find_function(hwclock, "set-voltage-high-1");
592 pfunc_cpu1_volt_low =
593 pmf_find_function(hwclock, "set-voltage-low-1");
594
595 /* Check we have minimum requirements */
596 if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
597 pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
598 printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
599 rc = -ENODEV;
600 goto bail;
601 }
602
603 /* Check that we have complete sets */
604 if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
605 pmf_put_function(pfunc_cpu0_volt_high);
606 pmf_put_function(pfunc_cpu0_volt_low);
607 pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
608 has_volt = 0;
609 }
610 if (!has_volt ||
611 pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
612 pmf_put_function(pfunc_cpu1_volt_high);
613 pmf_put_function(pfunc_cpu1_volt_low);
614 pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
615 }
616
617 /* Note: The device tree also contains a "platform-set-values"
618 * function for which I haven't quite figured out the usage. It
619 * might have to be called on init and/or wakeup, I'm not too sure
620 * but things seem to work fine without it so far ...
621 */
622
623 /* Get max frequency from device-tree */
624 valp = (u32 *)get_property(cpunode, "clock-frequency", NULL);
625 if (!valp) {
626 printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
627 rc = -ENODEV;
628 goto bail;
629 }
630
631 max_freq = (*valp)/1000;
632
633 /* Now calculate reduced frequency by using the cpuid input freq
634 * ratio. This requires 64 bits math unless we are willing to lose
635 * some precision
636 */
637 ih = *((u32 *)(eeprom + 0x10));
638 il = *((u32 *)(eeprom + 0x20));
639 min_freq = 0;
640 if (ih != 0 && il != 0)
641 min_freq = (max_freq * il) / ih;
642
643 /* Sanity check */
644 if (min_freq >= max_freq || min_freq < 1000) {
645 printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
646 rc = -ENODEV;
647 goto bail;
648 }
649 g5_cpu_freqs[0].frequency = max_freq;
650 g5_cpu_freqs[1].frequency = min_freq;
651
652 /* Set callbacks */
653 g5_switch_volt = g5_pfunc_switch_volt;
654 g5_switch_freq = g5_pfunc_switch_freq;
655 g5_query_freq = g5_pfunc_query_freq;
656
657 /* Force apply current frequency to make sure everything is in
658 * sync (voltage is right for example). Firmware may leave us with
659 * a strange setting ...
660 */
661 g5_switch_volt(CPUFREQ_HIGH);
662 msleep(10);
663 g5_pmode_cur = -1;
664 g5_switch_freq(g5_query_freq());
665
666 printk(KERN_INFO "Registering G5 CPU frequency driver\n");
667 printk(KERN_INFO "Frequency method: i2c/pfunc, "
668 "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
669 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
670 g5_cpu_freqs[1].frequency/1000,
671 g5_cpu_freqs[0].frequency/1000,
672 g5_cpu_freqs[g5_pmode_cur].frequency/1000);
673
674 rc = cpufreq_register_driver(&g5_cpufreq_driver);
675 bail:
676 if (rc != 0) {
677 pmf_put_function(pfunc_cpu_getfreq);
678 pmf_put_function(pfunc_cpu_setfreq_high);
679 pmf_put_function(pfunc_cpu_setfreq_low);
680 pmf_put_function(pfunc_slewing_done);
681 pmf_put_function(pfunc_cpu0_volt_high);
682 pmf_put_function(pfunc_cpu0_volt_low);
683 pmf_put_function(pfunc_cpu1_volt_high);
684 pmf_put_function(pfunc_cpu1_volt_low);
685 }
686 of_node_put(hwclock);
687 of_node_put(cpuid);
688 of_node_put(cpunode);
689
690 return rc;
691}
692
693static int __init g5_rm31_cpufreq_init(struct device_node *cpus)
694{
695 /* NYI */
696 return 0;
697}
698
699static int __init g5_cpufreq_init(void)
700{
701 struct device_node *cpus;
702 int rc;
703
704 cpus = of_find_node_by_path("/cpus");
705 if (cpus == NULL) {
706 DBG("No /cpus node !\n");
707 return -ENODEV;
708 }
709
710 if (machine_is_compatible("PowerMac7,2") ||
711 machine_is_compatible("PowerMac7,3"))
712 rc = g5_pm72_cpufreq_init(cpus);
713 else if (machine_is_compatible("RackMac3,1"))
714 rc = g5_rm31_cpufreq_init(cpus);
715 else
716 rc = g5_neo2_cpufreq_init(cpus);
717
718 of_node_put(cpus);
719 return rc;
720}
721
320module_init(g5_cpufreq_init); 722module_init(g5_cpufreq_init);
321 723
322 724
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index f6e22da2a5d..558dd069209 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -58,12 +58,11 @@ extern int powersave_lowspeed;
58extern int powersave_nap; 58extern int powersave_nap;
59extern struct device_node *k2_skiplist[2]; 59extern struct device_node *k2_skiplist[2];
60 60
61
62/* 61/*
63 * We use a single global lock to protect accesses. Each driver has 62 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking 63 * to take care of its own locking
65 */ 64 */
66static DEFINE_SPINLOCK(feature_lock); 65DEFINE_SPINLOCK(feature_lock);
67 66
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); 67#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); 68#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
@@ -101,26 +100,17 @@ static const char *macio_names[] =
101 "Keylargo", 100 "Keylargo",
102 "Pangea", 101 "Pangea",
103 "Intrepid", 102 "Intrepid",
104 "K2" 103 "K2",
104 "Shasta",
105}; 105};
106 106
107 107
108struct device_node *uninorth_node;
109u32 __iomem *uninorth_base;
108 110
109/*
110 * Uninorth reg. access. Note that Uni-N regs are big endian
111 */
112
113#define UN_REG(r) (uninorth_base + ((r) >> 2))
114#define UN_IN(r) (in_be32(UN_REG(r)))
115#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118
119static struct device_node *uninorth_node;
120static u32 __iomem *uninorth_base;
121static u32 uninorth_rev; 111static u32 uninorth_rev;
122static int uninorth_u3; 112static int uninorth_maj;
123static void __iomem *u3_ht; 113static void __iomem *u3_ht_base;
124 114
125/* 115/*
126 * For each motherboard family, we have a table of functions pointers 116 * For each motherboard family, we have a table of functions pointers
@@ -1399,8 +1389,15 @@ static long g5_fw_enable(struct device_node *node, long param, long value)
1399static long g5_mpic_enable(struct device_node *node, long param, long value) 1389static long g5_mpic_enable(struct device_node *node, long param, long value)
1400{ 1390{
1401 unsigned long flags; 1391 unsigned long flags;
1392 struct device_node *parent = of_get_parent(node);
1393 int is_u3;
1402 1394
1403 if (node->parent == NULL || strcmp(node->parent->name, "u3")) 1395 if (parent == NULL)
1396 return 0;
1397 is_u3 = strcmp(parent->name, "u3") == 0 ||
1398 strcmp(parent->name, "u4") == 0;
1399 of_node_put(parent);
1400 if (!is_u3)
1404 return 0; 1401 return 0;
1405 1402
1406 LOCK(flags); 1403 LOCK(flags);
@@ -1445,20 +1442,53 @@ static long g5_i2s_enable(struct device_node *node, long param, long value)
1445 /* Very crude implementation for now */ 1442 /* Very crude implementation for now */
1446 struct macio_chip *macio = &macio_chips[0]; 1443 struct macio_chip *macio = &macio_chips[0];
1447 unsigned long flags; 1444 unsigned long flags;
1448 1445 int cell;
1449 if (value == 0) 1446 u32 fcrs[3][3] = {
1450 return 0; /* don't disable yet */ 1447 { 0,
1448 K2_FCR1_I2S0_CELL_ENABLE |
1449 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE,
1450 KL3_I2S0_CLK18_ENABLE
1451 },
1452 { KL0_SCC_A_INTF_ENABLE,
1453 K2_FCR1_I2S1_CELL_ENABLE |
1454 K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE,
1455 KL3_I2S1_CLK18_ENABLE
1456 },
1457 { KL0_SCC_B_INTF_ENABLE,
1458 SH_FCR1_I2S2_CELL_ENABLE |
1459 SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE,
1460 SH_FCR3_I2S2_CLK18_ENABLE
1461 },
1462 };
1463
1464 if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
1465 return -ENODEV;
1466 if (strncmp(node->name, "i2s-", 4))
1467 return -ENODEV;
1468 cell = node->name[4] - 'a';
1469 switch(cell) {
1470 case 0:
1471 case 1:
1472 break;
1473 case 2:
1474 if (macio->type == macio_shasta)
1475 break;
1476 default:
1477 return -ENODEV;
1478 }
1451 1479
1452 LOCK(flags); 1480 LOCK(flags);
1453 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE | 1481 if (value) {
1454 KL3_I2S0_CLK18_ENABLE); 1482 MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]);
1455 udelay(10); 1483 MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]);
1456 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE | 1484 MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]);
1457 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE); 1485 } else {
1486 MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]);
1487 MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]);
1488 MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]);
1489 }
1458 udelay(10); 1490 udelay(10);
1459 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
1460 UNLOCK(flags); 1491 UNLOCK(flags);
1461 udelay(10);
1462 1492
1463 return 0; 1493 return 0;
1464} 1494}
@@ -1473,7 +1503,7 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
1473 struct device_node *np; 1503 struct device_node *np;
1474 1504
1475 macio = &macio_chips[0]; 1505 macio = &macio_chips[0];
1476 if (macio->type != macio_keylargo2) 1506 if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
1477 return -ENODEV; 1507 return -ENODEV;
1478 1508
1479 np = find_path_device("/cpus"); 1509 np = find_path_device("/cpus");
@@ -1512,14 +1542,17 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
1512 */ 1542 */
1513void g5_phy_disable_cpu1(void) 1543void g5_phy_disable_cpu1(void)
1514{ 1544{
1515 UN_OUT(U3_API_PHY_CONFIG_1, 0); 1545 if (uninorth_maj == 3)
1546 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1516} 1547}
1517#endif /* CONFIG_POWER4 */ 1548#endif /* CONFIG_POWER4 */
1518 1549
1519#ifndef CONFIG_POWER4 1550#ifndef CONFIG_POWER4
1520 1551
1521static void 1552
1522keylargo_shutdown(struct macio_chip *macio, int sleep_mode) 1553#ifdef CONFIG_PM
1554
1555static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1523{ 1556{
1524 u32 temp; 1557 u32 temp;
1525 1558
@@ -1572,8 +1605,7 @@ keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1572 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1605 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1573} 1606}
1574 1607
1575static void 1608static void pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1576pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1577{ 1609{
1578 u32 temp; 1610 u32 temp;
1579 1611
@@ -1606,8 +1638,7 @@ pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1606 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1638 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1607} 1639}
1608 1640
1609static void 1641static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1610intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1611{ 1642{
1612 u32 temp; 1643 u32 temp;
1613 1644
@@ -1635,124 +1666,6 @@ intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1635} 1666}
1636 1667
1637 1668
1638void pmac_tweak_clock_spreading(int enable)
1639{
1640 struct macio_chip *macio = &macio_chips[0];
1641
1642 /* Hack for doing clock spreading on some machines PowerBooks and
1643 * iBooks. This implements the "platform-do-clockspreading" OF
1644 * property as decoded manually on various models. For safety, we also
1645 * check the product ID in the device-tree in cases we'll whack the i2c
1646 * chip to make reasonably sure we won't set wrong values in there
1647 *
1648 * Of course, ultimately, we have to implement a real parser for
1649 * the platform-do-* stuff...
1650 */
1651
1652 if (macio->type == macio_intrepid) {
1653 struct device_node *clock =
1654 of_find_node_by_path("/uni-n@f8000000/hw-clock");
1655 if (clock && get_property(clock, "platform-do-clockspreading",
1656 NULL)) {
1657 printk(KERN_INFO "%sabling clock spreading on Intrepid"
1658 " ASIC\n", enable ? "En" : "Dis");
1659 if (enable)
1660 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1661 else
1662 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1663 mdelay(40);
1664 }
1665 of_node_put(clock);
1666 }
1667
1668 while (machine_is_compatible("PowerBook5,2") ||
1669 machine_is_compatible("PowerBook5,3") ||
1670 machine_is_compatible("PowerBook6,2") ||
1671 machine_is_compatible("PowerBook6,3")) {
1672 struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
1673 struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
1674 u8 buffer[9];
1675 u32 *productID;
1676 int i, rc, changed = 0;
1677
1678 if (dt == NULL)
1679 break;
1680 productID = (u32 *)get_property(dt, "pid#", NULL);
1681 if (productID == NULL)
1682 break;
1683 while(ui2c) {
1684 struct device_node *p = of_get_parent(ui2c);
1685 if (p && !strcmp(p->name, "uni-n"))
1686 break;
1687 ui2c = of_find_node_by_type(ui2c, "i2c");
1688 }
1689 if (ui2c == NULL)
1690 break;
1691 DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
1692 rc = pmac_low_i2c_open(ui2c, 1);
1693 if (rc != 0)
1694 break;
1695 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1696 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1697 DBG("read result: %d,", rc);
1698 if (rc != 0) {
1699 pmac_low_i2c_close(ui2c);
1700 break;
1701 }
1702 for (i=0; i<9; i++)
1703 DBG(" %02x", buffer[i]);
1704 DBG("\n");
1705
1706 switch(*productID) {
1707 case 0x1182: /* AlBook 12" rev 2 */
1708 case 0x1183: /* iBook G4 12" */
1709 buffer[0] = (buffer[0] & 0x8f) | 0x70;
1710 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1711 buffer[5] = (buffer[5] & 0x80) | 0x31;
1712 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1713 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
1714 buffer[8] = (buffer[8] & 0x00) | 0x30;
1715 changed = 1;
1716 break;
1717 case 0x3142: /* AlBook 15" (ATI M10) */
1718 case 0x3143: /* AlBook 17" (ATI M10) */
1719 buffer[0] = (buffer[0] & 0xaf) | 0x50;
1720 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1721 buffer[5] = (buffer[5] & 0x80) | 0x31;
1722 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1723 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
1724 buffer[8] = (buffer[8] & 0x00) | 0x30;
1725 changed = 1;
1726 break;
1727 default:
1728 DBG("i2c-hwclock: Machine model not handled\n");
1729 break;
1730 }
1731 if (!changed) {
1732 pmac_low_i2c_close(ui2c);
1733 break;
1734 }
1735 printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
1736 enable ? "En" : "Dis");
1737
1738 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1739 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1740 DBG("write result: %d,", rc);
1741 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1742 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1743 DBG("read result: %d,", rc);
1744 if (rc != 0) {
1745 pmac_low_i2c_close(ui2c);
1746 break;
1747 }
1748 for (i=0; i<9; i++)
1749 DBG(" %02x", buffer[i]);
1750 pmac_low_i2c_close(ui2c);
1751 break;
1752 }
1753}
1754
1755
1756static int 1669static int
1757core99_sleep(void) 1670core99_sleep(void)
1758{ 1671{
@@ -1909,6 +1822,8 @@ core99_wake_up(void)
1909 return 0; 1822 return 0;
1910} 1823}
1911 1824
1825#endif /* CONFIG_PM */
1826
1912static long 1827static long
1913core99_sleep_state(struct device_node *node, long param, long value) 1828core99_sleep_state(struct device_node *node, long param, long value)
1914{ 1829{
@@ -1930,10 +1845,13 @@ core99_sleep_state(struct device_node *node, long param, long value)
1930 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) 1845 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
1931 return -EPERM; 1846 return -EPERM;
1932 1847
1848#ifdef CONFIG_PM
1933 if (value == 1) 1849 if (value == 1)
1934 return core99_sleep(); 1850 return core99_sleep();
1935 else if (value == 0) 1851 else if (value == 0)
1936 return core99_wake_up(); 1852 return core99_wake_up();
1853
1854#endif /* CONFIG_PM */
1937 return 0; 1855 return 0;
1938} 1856}
1939 1857
@@ -2057,7 +1975,9 @@ static struct feature_table_entry core99_features[] = {
2057 { PMAC_FTR_USB_ENABLE, core99_usb_enable }, 1975 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2058 { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, 1976 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2059 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, 1977 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
1978#ifdef CONFIG_PM
2060 { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, 1979 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
1980#endif
2061#ifdef CONFIG_SMP 1981#ifdef CONFIG_SMP
2062 { PMAC_FTR_RESET_CPU, core99_reset_cpu }, 1982 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2063#endif /* CONFIG_SMP */ 1983#endif /* CONFIG_SMP */
@@ -2427,6 +2347,14 @@ static struct pmac_mb_def pmac_mb_defs[] = {
2427 PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 2347 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2428 0, 2348 0,
2429 }, 2349 },
2350 { "PowerMac11,2", "PowerMac G5 Dual Core",
2351 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2352 0,
2353 },
2354 { "PowerMac12,1", "iMac G5 (iSight)",
2355 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2356 0,
2357 },
2430 { "RackMac3,1", "XServe G5", 2358 { "RackMac3,1", "XServe G5",
2431 PMAC_TYPE_XSERVE_G5, g5_features, 2359 PMAC_TYPE_XSERVE_G5, g5_features,
2432 0, 2360 0,
@@ -2539,6 +2467,11 @@ static int __init probe_motherboard(void)
2539 pmac_mb.model_name = "Unknown K2-based"; 2467 pmac_mb.model_name = "Unknown K2-based";
2540 pmac_mb.features = g5_features; 2468 pmac_mb.features = g5_features;
2541 break; 2469 break;
2470 case macio_shasta:
2471 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA;
2472 pmac_mb.model_name = "Unknown Shasta-based";
2473 pmac_mb.features = g5_features;
2474 break;
2542#endif /* CONFIG_POWER4 */ 2475#endif /* CONFIG_POWER4 */
2543 default: 2476 default:
2544 return -ENODEV; 2477 return -ENODEV;
@@ -2607,6 +2540,8 @@ found:
2607 */ 2540 */
2608static void __init probe_uninorth(void) 2541static void __init probe_uninorth(void)
2609{ 2542{
2543 u32 *addrp;
2544 phys_addr_t address;
2610 unsigned long actrl; 2545 unsigned long actrl;
2611 2546
2612 /* Locate core99 Uni-N */ 2547 /* Locate core99 Uni-N */
@@ -2614,22 +2549,31 @@ static void __init probe_uninorth(void)
2614 /* Locate G5 u3 */ 2549 /* Locate G5 u3 */
2615 if (uninorth_node == NULL) { 2550 if (uninorth_node == NULL) {
2616 uninorth_node = of_find_node_by_name(NULL, "u3"); 2551 uninorth_node = of_find_node_by_name(NULL, "u3");
2617 uninorth_u3 = 1; 2552 uninorth_maj = 3;
2618 } 2553 }
2619 if (uninorth_node && uninorth_node->n_addrs > 0) { 2554 /* Locate G5 u4 */
2620 unsigned long address = uninorth_node->addrs[0].address; 2555 if (uninorth_node == NULL) {
2621 uninorth_base = ioremap(address, 0x40000); 2556 uninorth_node = of_find_node_by_name(NULL, "u4");
2622 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); 2557 uninorth_maj = 4;
2623 if (uninorth_u3) 2558 }
2624 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); 2559 if (uninorth_node == NULL)
2625 } else
2626 uninorth_node = NULL;
2627
2628 if (!uninorth_node)
2629 return; 2560 return;
2630 2561
2631 printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n", 2562 addrp = (u32 *)get_property(uninorth_node, "reg", NULL);
2632 uninorth_u3 ? "U3" : "UniNorth", uninorth_rev); 2563 if (addrp == NULL)
2564 return;
2565 address = of_translate_address(uninorth_node, addrp);
2566 if (address == 0)
2567 return;
2568 uninorth_base = ioremap(address, 0x40000);
2569 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
2570 if (uninorth_maj == 3 || uninorth_maj == 4)
2571 u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
2572
2573 printk(KERN_INFO "Found %s memory controller & host bridge"
2574 " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" :
2575 uninorth_maj == 4 ? "U4" : "UniNorth",
2576 (unsigned int)address, uninorth_rev);
2633 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); 2577 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
2634 2578
2635 /* Set the arbitrer QAck delay according to what Apple does 2579 /* Set the arbitrer QAck delay according to what Apple does
@@ -2637,7 +2581,8 @@ static void __init probe_uninorth(void)
2637 if (uninorth_rev < 0x11) { 2581 if (uninorth_rev < 0x11) {
2638 actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; 2582 actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
2639 actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : 2583 actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
2640 UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; 2584 UNI_N_ARB_CTRL_QACK_DELAY) <<
2585 UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
2641 UN_OUT(UNI_N_ARB_CTRL, actrl); 2586 UN_OUT(UNI_N_ARB_CTRL, actrl);
2642 } 2587 }
2643 2588
@@ -2645,7 +2590,8 @@ static void __init probe_uninorth(void)
2645 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI 2590 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
2646 * memory timeout 2591 * memory timeout
2647 */ 2592 */
2648 if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0) 2593 if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) ||
2594 uninorth_rev == 0xc0)
2649 UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); 2595 UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
2650} 2596}
2651 2597
@@ -2653,18 +2599,17 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
2653{ 2599{
2654 struct device_node* node; 2600 struct device_node* node;
2655 int i; 2601 int i;
2656 volatile u32 __iomem * base; 2602 volatile u32 __iomem *base;
2657 u32* revp; 2603 u32 *addrp, *revp;
2604 phys_addr_t addr;
2605 u64 size;
2658 2606
2659 node = find_devices(name); 2607 for (node = NULL; (node = of_find_node_by_name(node, name)) != NULL;) {
2660 if (!node || !node->n_addrs) 2608 if (!compat)
2661 return; 2609 break;
2662 if (compat) 2610 if (device_is_compatible(node, compat))
2663 do { 2611 break;
2664 if (device_is_compatible(node, compat)) 2612 }
2665 break;
2666 node = node->next;
2667 } while (node);
2668 if (!node) 2613 if (!node)
2669 return; 2614 return;
2670 for(i=0; i<MAX_MACIO_CHIPS; i++) { 2615 for(i=0; i<MAX_MACIO_CHIPS; i++) {
@@ -2673,22 +2618,38 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
2673 if (macio_chips[i].of_node == node) 2618 if (macio_chips[i].of_node == node)
2674 return; 2619 return;
2675 } 2620 }
2621
2676 if (i >= MAX_MACIO_CHIPS) { 2622 if (i >= MAX_MACIO_CHIPS) {
2677 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); 2623 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
2678 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); 2624 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
2679 return; 2625 return;
2680 } 2626 }
2681 base = ioremap(node->addrs[0].address, node->addrs[0].size); 2627 addrp = of_get_pci_address(node, 0, &size, NULL);
2628 if (addrp == NULL) {
2629 printk(KERN_ERR "pmac_feature: %s: can't find base !\n",
2630 node->full_name);
2631 return;
2632 }
2633 addr = of_translate_address(node, addrp);
2634 if (addr == 0) {
2635 printk(KERN_ERR "pmac_feature: %s, can't translate base !\n",
2636 node->full_name);
2637 return;
2638 }
2639 base = ioremap(addr, (unsigned long)size);
2682 if (!base) { 2640 if (!base) {
2683 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n"); 2641 printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n",
2642 node->full_name);
2684 return; 2643 return;
2685 } 2644 }
2686 if (type == macio_keylargo) { 2645 if (type == macio_keylargo || type == macio_keylargo2) {
2687 u32 *did = (u32 *)get_property(node, "device-id", NULL); 2646 u32 *did = (u32 *)get_property(node, "device-id", NULL);
2688 if (*did == 0x00000025) 2647 if (*did == 0x00000025)
2689 type = macio_pangea; 2648 type = macio_pangea;
2690 if (*did == 0x0000003e) 2649 if (*did == 0x0000003e)
2691 type = macio_intrepid; 2650 type = macio_intrepid;
2651 if (*did == 0x0000004f)
2652 type = macio_shasta;
2692 } 2653 }
2693 macio_chips[i].of_node = node; 2654 macio_chips[i].of_node = node;
2694 macio_chips[i].type = type; 2655 macio_chips[i].type = type;
@@ -2787,7 +2748,8 @@ set_initial_features(void)
2787 } 2748 }
2788 2749
2789#ifdef CONFIG_POWER4 2750#ifdef CONFIG_POWER4
2790 if (macio_chips[0].type == macio_keylargo2) { 2751 if (macio_chips[0].type == macio_keylargo2 ||
2752 macio_chips[0].type == macio_shasta) {
2791#ifndef CONFIG_SMP 2753#ifndef CONFIG_SMP
2792 /* On SMP machines running UP, we have the second CPU eating 2754 /* On SMP machines running UP, we have the second CPU eating
2793 * bus cycles. We need to take it off the bus. This is done 2755 * bus cycles. We need to take it off the bus. This is done
@@ -2896,12 +2858,6 @@ set_initial_features(void)
2896 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); 2858 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
2897 } 2859 }
2898 2860
2899 /* Some machine models need the clock chip to be properly setup for
2900 * clock spreading now. This should be a platform function but we
2901 * don't do these at the moment
2902 */
2903 pmac_tweak_clock_spreading(1);
2904
2905#endif /* CONFIG_POWER4 */ 2861#endif /* CONFIG_POWER4 */
2906 2862
2907 /* On all machines, switch modem & serial ports off */ 2863 /* On all machines, switch modem & serial ports off */
@@ -2929,9 +2885,6 @@ pmac_feature_init(void)
2929 return; 2885 return;
2930 } 2886 }
2931 2887
2932 /* Setup low-level i2c stuffs */
2933 pmac_init_low_i2c();
2934
2935 /* Probe machine type */ 2888 /* Probe machine type */
2936 if (probe_motherboard()) 2889 if (probe_motherboard())
2937 printk(KERN_WARNING "Unknown PowerMac !\n"); 2890 printk(KERN_WARNING "Unknown PowerMac !\n");
@@ -2942,26 +2895,6 @@ pmac_feature_init(void)
2942 set_initial_features(); 2895 set_initial_features();
2943} 2896}
2944 2897
2945int __init pmac_feature_late_init(void)
2946{
2947#if 0
2948 struct device_node *np;
2949
2950 /* Request some resources late */
2951 if (uninorth_node)
2952 request_OF_resource(uninorth_node, 0, NULL);
2953 np = find_devices("hammerhead");
2954 if (np)
2955 request_OF_resource(np, 0, NULL);
2956 np = find_devices("interrupt-controller");
2957 if (np)
2958 request_OF_resource(np, 0, NULL);
2959#endif
2960 return 0;
2961}
2962
2963device_initcall(pmac_feature_late_init);
2964
2965#if 0 2898#if 0
2966static void dump_HT_speeds(char *name, u32 cfg, u32 frq) 2899static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
2967{ 2900{
@@ -2984,9 +2917,9 @@ void __init pmac_check_ht_link(void)
2984 u8 px_bus, px_devfn; 2917 u8 px_bus, px_devfn;
2985 struct pci_controller *px_hose; 2918 struct pci_controller *px_hose;
2986 2919
2987 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND); 2920 (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND);
2988 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG); 2921 ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG);
2989 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ); 2922 ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ);
2990 dump_HT_speeds("U3 HyperTransport", cfg, freq); 2923 dump_HT_speeds("U3 HyperTransport", cfg, freq);
2991 2924
2992 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); 2925 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f3f39e8e337..535c802b369 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -1,22 +1,34 @@
1/* 1/*
2 * arch/ppc/platforms/pmac_low_i2c.c 2 * arch/powerpc/platforms/powermac/low_i2c.c
3 * 3 *
4 * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org) 4 * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * This file contains some low-level i2c access routines that 11 * The linux i2c layer isn't completely suitable for our needs for various
12 * need to be used by various bits of the PowerMac platform code 12 * reasons ranging from too late initialisation to semantics not perfectly
13 * at times where the real asynchronous & interrupt driven driver 13 * matching some requirements of the apple platform functions etc...
14 * cannot be used. The API borrows some semantics from the darwin 14 *
15 * driver in order to ease the implementation of the platform 15 * This file thus provides a simple low level unified i2c interface for
16 * properties parser 16 * powermac that covers the various types of i2c busses used in Apple machines.
17 * For now, keywest, PMU and SMU, though we could add Cuda, or other bit
18 * banging busses found on older chipstes in earlier machines if we ever need
19 * one of them.
20 *
21 * The drivers in this file are synchronous/blocking. In addition, the
22 * keywest one is fairly slow due to the use of msleep instead of interrupts
23 * as the interrupt is currently used by i2c-keywest. In the long run, we
24 * might want to get rid of those high-level interfaces to linux i2c layer
25 * either completely (converting all drivers) or replacing them all with a
26 * single stub driver on top of this one. Once done, the interrupt will be
27 * available for our use.
17 */ 28 */
18 29
19#undef DEBUG 30#undef DEBUG
31#undef DEBUG_LOW
20 32
21#include <linux/config.h> 33#include <linux/config.h>
22#include <linux/types.h> 34#include <linux/types.h>
@@ -25,66 +37,91 @@
25#include <linux/module.h> 37#include <linux/module.h>
26#include <linux/adb.h> 38#include <linux/adb.h>
27#include <linux/pmu.h> 39#include <linux/pmu.h>
40#include <linux/delay.h>
41#include <linux/completion.h>
42#include <linux/platform_device.h>
43#include <linux/interrupt.h>
44#include <linux/completion.h>
45#include <linux/timer.h>
28#include <asm/keylargo.h> 46#include <asm/keylargo.h>
29#include <asm/uninorth.h> 47#include <asm/uninorth.h>
30#include <asm/io.h> 48#include <asm/io.h>
31#include <asm/prom.h> 49#include <asm/prom.h>
32#include <asm/machdep.h> 50#include <asm/machdep.h>
51#include <asm/smu.h>
52#include <asm/pmac_pfunc.h>
33#include <asm/pmac_low_i2c.h> 53#include <asm/pmac_low_i2c.h>
34 54
35#define MAX_LOW_I2C_HOST 4
36
37#ifdef DEBUG 55#ifdef DEBUG
38#define DBG(x...) do {\ 56#define DBG(x...) do {\
39 printk(KERN_DEBUG "KW:" x); \ 57 printk(KERN_DEBUG "low_i2c:" x); \
40 } while(0) 58 } while(0)
41#else 59#else
42#define DBG(x...) 60#define DBG(x...)
43#endif 61#endif
44 62
45struct low_i2c_host; 63#ifdef DEBUG_LOW
46 64#define DBG_LOW(x...) do {\
47typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len); 65 printk(KERN_DEBUG "low_i2c:" x); \
66 } while(0)
67#else
68#define DBG_LOW(x...)
69#endif
48 70
49struct low_i2c_host
50{
51 struct device_node *np; /* OF device node */
52 struct semaphore mutex; /* Access mutex for use by i2c-keywest */
53 low_i2c_func_t func; /* Access function */
54 unsigned int is_open : 1; /* Poor man's access control */
55 int mode; /* Current mode */
56 int channel; /* Current channel */
57 int num_channels; /* Number of channels */
58 void __iomem *base; /* For keywest-i2c, base address */
59 int bsteps; /* And register stepping */
60 int speed; /* And speed */
61};
62 71
63static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST]; 72static int pmac_i2c_force_poll = 1;
64 73
65/* No locking is necessary on allocation, we are running way before 74/*
66 * anything can race with us 75 * A bus structure. Each bus in the system has such a structure associated.
67 */ 76 */
68static struct low_i2c_host *find_low_i2c_host(struct device_node *np) 77struct pmac_i2c_bus
69{ 78{
70 int i; 79 struct list_head link;
80 struct device_node *controller;
81 struct device_node *busnode;
82 int type;
83 int flags;
84 struct i2c_adapter *adapter;
85 void *hostdata;
86 int channel; /* some hosts have multiple */
87 int mode; /* current mode */
88 struct semaphore sem;
89 int opened;
90 int polled; /* open mode */
91 struct platform_device *platform_dev;
92
93 /* ops */
94 int (*open)(struct pmac_i2c_bus *bus);
95 void (*close)(struct pmac_i2c_bus *bus);
96 int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
97 u32 subaddr, u8 *data, int len);
98};
71 99
72 for (i = 0; i < MAX_LOW_I2C_HOST; i++) 100static LIST_HEAD(pmac_i2c_busses);
73 if (low_i2c_hosts[i].np == np)
74 return &low_i2c_hosts[i];
75 return NULL;
76}
77 101
78/* 102/*
79 * 103 * Keywest implementation
80 * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's)
81 *
82 */ 104 */
83 105
84/* 106struct pmac_i2c_host_kw
85 * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h, 107{
86 * should be moved somewhere in include/asm-ppc/ 108 struct semaphore mutex; /* Access mutex for use by
87 */ 109 * i2c-keywest */
110 void __iomem *base; /* register base address */
111 int bsteps; /* register stepping */
112 int speed; /* speed */
113 int irq;
114 u8 *data;
115 unsigned len;
116 int state;
117 int rw;
118 int polled;
119 int result;
120 struct completion complete;
121 spinlock_t lock;
122 struct timer_list timeout_timer;
123};
124
88/* Register indices */ 125/* Register indices */
89typedef enum { 126typedef enum {
90 reg_mode = 0, 127 reg_mode = 0,
@@ -97,6 +134,8 @@ typedef enum {
97 reg_data 134 reg_data
98} reg_t; 135} reg_t;
99 136
137/* The Tumbler audio equalizer can be really slow sometimes */
138#define KW_POLL_TIMEOUT (2*HZ)
100 139
101/* Mode register */ 140/* Mode register */
102#define KW_I2C_MODE_100KHZ 0x00 141#define KW_I2C_MODE_100KHZ 0x00
@@ -140,8 +179,9 @@ enum {
140}; 179};
141 180
142#define WRONG_STATE(name) do {\ 181#define WRONG_STATE(name) do {\
143 printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \ 182 printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
144 name, __kw_state_names[state], isr); \ 183 "(isr: %02x)\n", \
184 name, __kw_state_names[host->state], isr); \
145 } while(0) 185 } while(0)
146 186
147static const char *__kw_state_names[] = { 187static const char *__kw_state_names[] = {
@@ -153,120 +193,137 @@ static const char *__kw_state_names[] = {
153 "state_dead" 193 "state_dead"
154}; 194};
155 195
156static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg) 196static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
157{ 197{
158 return readb(host->base + (((unsigned int)reg) << host->bsteps)); 198 return readb(host->base + (((unsigned int)reg) << host->bsteps));
159} 199}
160 200
161static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val) 201static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
202 reg_t reg, u8 val)
162{ 203{
163 writeb(val, host->base + (((unsigned)reg) << host->bsteps)); 204 writeb(val, host->base + (((unsigned)reg) << host->bsteps));
164 (void)__kw_read_reg(host, reg_subaddr); 205 (void)__kw_read_reg(host, reg_subaddr);
165} 206}
166 207
167#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) 208#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
168#define kw_read_reg(reg) __kw_read_reg(host, reg) 209#define kw_read_reg(reg) __kw_read_reg(host, reg)
169
170 210
171/* Don't schedule, the g5 fan controller is too 211static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
172 * timing sensitive
173 */
174static u8 kw_wait_interrupt(struct low_i2c_host* host)
175{ 212{
176 int i, j; 213 int i, j;
177 u8 isr; 214 u8 isr;
178 215
179 for (i = 0; i < 100000; i++) { 216 for (i = 0; i < 1000; i++) {
180 isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; 217 isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
181 if (isr != 0) 218 if (isr != 0)
182 return isr; 219 return isr;
183 220
184 /* This code is used with the timebase frozen, we cannot rely 221 /* This code is used with the timebase frozen, we cannot rely
185 * on udelay ! For now, just use a bogus loop 222 * on udelay nor schedule when in polled mode !
223 * For now, just use a bogus loop....
186 */ 224 */
187 for (j = 1; j < 10000; j++) 225 if (host->polled) {
188 mb(); 226 for (j = 1; j < 100000; j++)
227 mb();
228 } else
229 msleep(1);
189 } 230 }
190 return isr; 231 return isr;
191} 232}
192 233
193static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr) 234static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
194{ 235{
195 u8 ack; 236 u8 ack;
196 237
197 DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr); 238 DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
239 __kw_state_names[host->state], isr);
240
241 if (host->state == state_idle) {
242 printk(KERN_WARNING "low_i2c: Keywest got an out of state"
243 " interrupt, ignoring\n");
244 kw_write_reg(reg_isr, isr);
245 return;
246 }
198 247
199 if (isr == 0) { 248 if (isr == 0) {
200 if (state != state_stop) { 249 if (host->state != state_stop) {
201 DBG("KW: Timeout !\n"); 250 DBG_LOW("KW: Timeout !\n");
202 *rc = -EIO; 251 host->result = -EIO;
203 goto stop; 252 goto stop;
204 } 253 }
205 if (state == state_stop) { 254 if (host->state == state_stop) {
206 ack = kw_read_reg(reg_status); 255 ack = kw_read_reg(reg_status);
207 if (!(ack & KW_I2C_STAT_BUSY)) { 256 if (ack & KW_I2C_STAT_BUSY)
208 state = state_idle; 257 kw_write_reg(reg_status, 0);
209 kw_write_reg(reg_ier, 0x00); 258 host->state = state_idle;
210 } 259 kw_write_reg(reg_ier, 0x00);
260 if (!host->polled)
261 complete(&host->complete);
211 } 262 }
212 return state; 263 return;
213 } 264 }
214 265
215 if (isr & KW_I2C_IRQ_ADDR) { 266 if (isr & KW_I2C_IRQ_ADDR) {
216 ack = kw_read_reg(reg_status); 267 ack = kw_read_reg(reg_status);
217 if (state != state_addr) { 268 if (host->state != state_addr) {
218 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); 269 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
219 WRONG_STATE("KW_I2C_IRQ_ADDR"); 270 WRONG_STATE("KW_I2C_IRQ_ADDR");
220 *rc = -EIO; 271 host->result = -EIO;
221 goto stop; 272 goto stop;
222 } 273 }
223 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { 274 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
224 *rc = -ENODEV; 275 host->result = -ENODEV;
225 DBG("KW: NAK on address\n"); 276 DBG_LOW("KW: NAK on address\n");
226 return state_stop; 277 host->state = state_stop;
278 return;
227 } else { 279 } else {
228 if (rw) { 280 if (host->len == 0) {
229 state = state_read; 281 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
230 if (*len > 1) 282 goto stop;
231 kw_write_reg(reg_control, KW_I2C_CTL_AAK); 283 }
284 if (host->rw) {
285 host->state = state_read;
286 if (host->len > 1)
287 kw_write_reg(reg_control,
288 KW_I2C_CTL_AAK);
232 } else { 289 } else {
233 state = state_write; 290 host->state = state_write;
234 kw_write_reg(reg_data, **data); 291 kw_write_reg(reg_data, *(host->data++));
235 (*data)++; (*len)--; 292 host->len--;
236 } 293 }
237 } 294 }
238 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); 295 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
239 } 296 }
240 297
241 if (isr & KW_I2C_IRQ_DATA) { 298 if (isr & KW_I2C_IRQ_DATA) {
242 if (state == state_read) { 299 if (host->state == state_read) {
243 **data = kw_read_reg(reg_data); 300 *(host->data++) = kw_read_reg(reg_data);
244 (*data)++; (*len)--; 301 host->len--;
245 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); 302 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
246 if ((*len) == 0) 303 if (host->len == 0)
247 state = state_stop; 304 host->state = state_stop;
248 else if ((*len) == 1) 305 else if (host->len == 1)
249 kw_write_reg(reg_control, 0); 306 kw_write_reg(reg_control, 0);
250 } else if (state == state_write) { 307 } else if (host->state == state_write) {
251 ack = kw_read_reg(reg_status); 308 ack = kw_read_reg(reg_status);
252 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { 309 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
253 DBG("KW: nack on data write\n"); 310 DBG_LOW("KW: nack on data write\n");
254 *rc = -EIO; 311 host->result = -EIO;
255 goto stop; 312 goto stop;
256 } else if (*len) { 313 } else if (host->len) {
257 kw_write_reg(reg_data, **data); 314 kw_write_reg(reg_data, *(host->data++));
258 (*data)++; (*len)--; 315 host->len--;
259 } else { 316 } else {
260 kw_write_reg(reg_control, KW_I2C_CTL_STOP); 317 kw_write_reg(reg_control, KW_I2C_CTL_STOP);
261 state = state_stop; 318 host->state = state_stop;
262 *rc = 0; 319 host->result = 0;
263 } 320 }
264 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); 321 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
265 } else { 322 } else {
266 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); 323 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
267 WRONG_STATE("KW_I2C_IRQ_DATA"); 324 WRONG_STATE("KW_I2C_IRQ_DATA");
268 if (state != state_stop) { 325 if (host->state != state_stop) {
269 *rc = -EIO; 326 host->result = -EIO;
270 goto stop; 327 goto stop;
271 } 328 }
272 } 329 }
@@ -274,98 +331,194 @@ static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int
274 331
275 if (isr & KW_I2C_IRQ_STOP) { 332 if (isr & KW_I2C_IRQ_STOP) {
276 kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); 333 kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
277 if (state != state_stop) { 334 if (host->state != state_stop) {
278 WRONG_STATE("KW_I2C_IRQ_STOP"); 335 WRONG_STATE("KW_I2C_IRQ_STOP");
279 *rc = -EIO; 336 host->result = -EIO;
280 } 337 }
281 return state_idle; 338 host->state = state_idle;
339 if (!host->polled)
340 complete(&host->complete);
282 } 341 }
283 342
284 if (isr & KW_I2C_IRQ_START) 343 if (isr & KW_I2C_IRQ_START)
285 kw_write_reg(reg_isr, KW_I2C_IRQ_START); 344 kw_write_reg(reg_isr, KW_I2C_IRQ_START);
286 345
287 return state; 346 return;
288
289 stop: 347 stop:
290 kw_write_reg(reg_control, KW_I2C_CTL_STOP); 348 kw_write_reg(reg_control, KW_I2C_CTL_STOP);
291 return state_stop; 349 host->state = state_stop;
350 return;
292} 351}
293 352
294static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len) 353/* Interrupt handler */
354static irqreturn_t kw_i2c_irq(int irq, void *dev_id, struct pt_regs *regs)
295{ 355{
356 struct pmac_i2c_host_kw *host = dev_id;
357 unsigned long flags;
358
359 spin_lock_irqsave(&host->lock, flags);
360 del_timer(&host->timeout_timer);
361 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
362 if (host->state != state_idle) {
363 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
364 add_timer(&host->timeout_timer);
365 }
366 spin_unlock_irqrestore(&host->lock, flags);
367 return IRQ_HANDLED;
368}
369
370static void kw_i2c_timeout(unsigned long data)
371{
372 struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
373 unsigned long flags;
374
375 spin_lock_irqsave(&host->lock, flags);
376 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
377 if (host->state != state_idle) {
378 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
379 add_timer(&host->timeout_timer);
380 }
381 spin_unlock_irqrestore(&host->lock, flags);
382}
383
384static int kw_i2c_open(struct pmac_i2c_bus *bus)
385{
386 struct pmac_i2c_host_kw *host = bus->hostdata;
387 down(&host->mutex);
388 return 0;
389}
390
391static void kw_i2c_close(struct pmac_i2c_bus *bus)
392{
393 struct pmac_i2c_host_kw *host = bus->hostdata;
394 up(&host->mutex);
395}
396
397static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
398 u32 subaddr, u8 *data, int len)
399{
400 struct pmac_i2c_host_kw *host = bus->hostdata;
296 u8 mode_reg = host->speed; 401 u8 mode_reg = host->speed;
297 int state = state_addr; 402 int use_irq = host->irq != NO_IRQ && !bus->polled;
298 int rc = 0;
299 403
300 /* Setup mode & subaddress if any */ 404 /* Setup mode & subaddress if any */
301 switch(host->mode) { 405 switch(bus->mode) {
302 case pmac_low_i2c_mode_dumb: 406 case pmac_i2c_mode_dumb:
303 printk(KERN_ERR "low_i2c: Dumb mode not supported !\n");
304 return -EINVAL; 407 return -EINVAL;
305 case pmac_low_i2c_mode_std: 408 case pmac_i2c_mode_std:
306 mode_reg |= KW_I2C_MODE_STANDARD; 409 mode_reg |= KW_I2C_MODE_STANDARD;
410 if (subsize != 0)
411 return -EINVAL;
307 break; 412 break;
308 case pmac_low_i2c_mode_stdsub: 413 case pmac_i2c_mode_stdsub:
309 mode_reg |= KW_I2C_MODE_STANDARDSUB; 414 mode_reg |= KW_I2C_MODE_STANDARDSUB;
415 if (subsize != 1)
416 return -EINVAL;
310 break; 417 break;
311 case pmac_low_i2c_mode_combined: 418 case pmac_i2c_mode_combined:
312 mode_reg |= KW_I2C_MODE_COMBINED; 419 mode_reg |= KW_I2C_MODE_COMBINED;
420 if (subsize != 1)
421 return -EINVAL;
313 break; 422 break;
314 } 423 }
315 424
316 /* Setup channel & clear pending irqs */ 425 /* Setup channel & clear pending irqs */
317 kw_write_reg(reg_isr, kw_read_reg(reg_isr)); 426 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
318 kw_write_reg(reg_mode, mode_reg | (host->channel << 4)); 427 kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
319 kw_write_reg(reg_status, 0); 428 kw_write_reg(reg_status, 0);
320 429
321 /* Set up address and r/w bit */ 430 /* Set up address and r/w bit, strip possible stale bus number from
322 kw_write_reg(reg_addr, addr); 431 * address top bits
432 */
433 kw_write_reg(reg_addr, addrdir & 0xff);
323 434
324 /* Set up the sub address */ 435 /* Set up the sub address */
325 if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB 436 if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
326 || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) 437 || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
327 kw_write_reg(reg_subaddr, subaddr); 438 kw_write_reg(reg_subaddr, subaddr);
328 439
329 /* Start sending address & disable interrupt*/ 440 /* Prepare for async operations */
330 kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/); 441 host->data = data;
442 host->len = len;
443 host->state = state_addr;
444 host->result = 0;
445 host->rw = (addrdir & 1);
446 host->polled = bus->polled;
447
448 /* Enable interrupt if not using polled mode and interrupt is
449 * available
450 */
451 if (use_irq) {
452 /* Clear completion */
453 INIT_COMPLETION(host->complete);
454 /* Ack stale interrupts */
455 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
456 /* Arm timeout */
457 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
458 add_timer(&host->timeout_timer);
459 /* Enable emission */
460 kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
461 }
462
463 /* Start sending address */
331 kw_write_reg(reg_control, KW_I2C_CTL_XADDR); 464 kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
332 465
333 /* State machine, to turn into an interrupt handler */ 466 /* Wait for completion */
334 while(state != state_idle) { 467 if (use_irq)
335 u8 isr = kw_wait_interrupt(host); 468 wait_for_completion(&host->complete);
336 state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr); 469 else {
470 while(host->state != state_idle) {
471 unsigned long flags;
472
473 u8 isr = kw_i2c_wait_interrupt(host);
474 spin_lock_irqsave(&host->lock, flags);
475 kw_i2c_handle_interrupt(host, isr);
476 spin_unlock_irqrestore(&host->lock, flags);
477 }
337 } 478 }
338 479
339 return rc; 480 /* Disable emission */
481 kw_write_reg(reg_ier, 0);
482
483 return host->result;
340} 484}
341 485
342static void keywest_low_i2c_add(struct device_node *np) 486static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
343{ 487{
344 struct low_i2c_host *host = find_low_i2c_host(NULL); 488 struct pmac_i2c_host_kw *host;
345 u32 *psteps, *prate, steps, aoffset = 0; 489 u32 *psteps, *prate, *addrp, steps;
346 struct device_node *parent;
347 490
491 host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
348 if (host == NULL) { 492 if (host == NULL) {
349 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", 493 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
350 np->full_name); 494 np->full_name);
351 return; 495 return NULL;
352 } 496 }
353 memset(host, 0, sizeof(*host));
354 497
498 /* Apple is kind enough to provide a valid AAPL,address property
499 * on all i2c keywest nodes so far ... we would have to fallback
500 * to macio parsing if that wasn't the case
501 */
502 addrp = (u32 *)get_property(np, "AAPL,address", NULL);
503 if (addrp == NULL) {
504 printk(KERN_ERR "low_i2c: Can't find address for %s\n",
505 np->full_name);
506 kfree(host);
507 return NULL;
508 }
355 init_MUTEX(&host->mutex); 509 init_MUTEX(&host->mutex);
356 host->np = of_node_get(np); 510 init_completion(&host->complete);
511 spin_lock_init(&host->lock);
512 init_timer(&host->timeout_timer);
513 host->timeout_timer.function = kw_i2c_timeout;
514 host->timeout_timer.data = (unsigned long)host;
515
357 psteps = (u32 *)get_property(np, "AAPL,address-step", NULL); 516 psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
358 steps = psteps ? (*psteps) : 0x10; 517 steps = psteps ? (*psteps) : 0x10;
359 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) 518 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
360 steps >>= 1; 519 steps >>= 1;
361 parent = of_get_parent(np);
362 host->num_channels = 1;
363 if (parent && parent->name[0] == 'u') {
364 host->num_channels = 2;
365 aoffset = 3;
366 }
367 /* Select interface rate */ 520 /* Select interface rate */
368 host->speed = KW_I2C_MODE_100KHZ; 521 host->speed = KW_I2C_MODE_25KHZ;
369 prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL); 522 prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
370 if (prate) switch(*prate) { 523 if (prate) switch(*prate) {
371 case 100: 524 case 100:
@@ -378,146 +531,981 @@ static void keywest_low_i2c_add(struct device_node *np)
378 host->speed = KW_I2C_MODE_25KHZ; 531 host->speed = KW_I2C_MODE_25KHZ;
379 break; 532 break;
380 } 533 }
534 if (np->n_intrs > 0)
535 host->irq = np->intrs[0].line;
536 else
537 host->irq = NO_IRQ;
538
539 host->base = ioremap((*addrp), 0x1000);
540 if (host->base == NULL) {
541 printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
542 np->full_name);
543 kfree(host);
544 return NULL;
545 }
546
547 /* Make sure IRA is disabled */
548 kw_write_reg(reg_ier, 0);
549
550 /* Request chip interrupt */
551 if (request_irq(host->irq, kw_i2c_irq, SA_SHIRQ, "keywest i2c", host))
552 host->irq = NO_IRQ;
553
554 printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
555 *addrp, host->irq, np->full_name);
381 556
382 host->mode = pmac_low_i2c_mode_std; 557 return host;
383 host->base = ioremap(np->addrs[0].address + aoffset,
384 np->addrs[0].size);
385 host->func = keywest_low_i2c_func;
386} 558}
387 559
560
561static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
562 struct device_node *controller,
563 struct device_node *busnode,
564 int channel)
565{
566 struct pmac_i2c_bus *bus;
567
568 bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
569 if (bus == NULL)
570 return;
571
572 bus->controller = of_node_get(controller);
573 bus->busnode = of_node_get(busnode);
574 bus->type = pmac_i2c_bus_keywest;
575 bus->hostdata = host;
576 bus->channel = channel;
577 bus->mode = pmac_i2c_mode_std;
578 bus->open = kw_i2c_open;
579 bus->close = kw_i2c_close;
580 bus->xfer = kw_i2c_xfer;
581 init_MUTEX(&bus->sem);
582 if (controller == busnode)
583 bus->flags = pmac_i2c_multibus;
584 list_add(&bus->link, &pmac_i2c_busses);
585
586 printk(KERN_INFO " channel %d bus %s\n", channel,
587 (controller == busnode) ? "<multibus>" : busnode->full_name);
588}
589
590static void __init kw_i2c_probe(void)
591{
592 struct device_node *np, *child, *parent;
593
594 /* Probe keywest-i2c busses */
595 for (np = NULL;
596 (np = of_find_compatible_node(np, "i2c","keywest-i2c")) != NULL;){
597 struct pmac_i2c_host_kw *host;
598 int multibus, chans, i;
599
600 /* Found one, init a host structure */
601 host = kw_i2c_host_init(np);
602 if (host == NULL)
603 continue;
604
605 /* Now check if we have a multibus setup (old style) or if we
606 * have proper bus nodes. Note that the "new" way (proper bus
607 * nodes) might cause us to not create some busses that are
608 * kept hidden in the device-tree. In the future, we might
609 * want to work around that by creating busses without a node
610 * but not for now
611 */
612 child = of_get_next_child(np, NULL);
613 multibus = !child || strcmp(child->name, "i2c-bus");
614 of_node_put(child);
615
616 /* For a multibus setup, we get the bus count based on the
617 * parent type
618 */
619 if (multibus) {
620 parent = of_get_parent(np);
621 if (parent == NULL)
622 continue;
623 chans = parent->name[0] == 'u' ? 2 : 1;
624 for (i = 0; i < chans; i++)
625 kw_i2c_add(host, np, np, i);
626 } else {
627 for (child = NULL;
628 (child = of_get_next_child(np, child)) != NULL;) {
629 u32 *reg =
630 (u32 *)get_property(child, "reg", NULL);
631 if (reg == NULL)
632 continue;
633 kw_i2c_add(host, np, child, *reg);
634 }
635 }
636 }
637}
638
639
388/* 640/*
389 * 641 *
390 * PMU implementation 642 * PMU implementation
391 * 643 *
392 */ 644 */
393 645
394
395#ifdef CONFIG_ADB_PMU 646#ifdef CONFIG_ADB_PMU
396 647
397static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len) 648/*
649 * i2c command block to the PMU
650 */
651struct pmu_i2c_hdr {
652 u8 bus;
653 u8 mode;
654 u8 bus2;
655 u8 address;
656 u8 sub_addr;
657 u8 comb_addr;
658 u8 count;
659 u8 data[];
660};
661
662static void pmu_i2c_complete(struct adb_request *req)
398{ 663{
399 // TODO 664 complete(req->arg);
400 return -ENODEV;
401} 665}
402 666
403static void pmu_low_i2c_add(struct device_node *np) 667static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
668 u32 subaddr, u8 *data, int len)
404{ 669{
405 struct low_i2c_host *host = find_low_i2c_host(NULL); 670 struct adb_request *req = bus->hostdata;
671 struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
672 struct completion comp;
673 int read = addrdir & 1;
674 int retry;
675 int rc = 0;
406 676
407 if (host == NULL) { 677 /* For now, limit ourselves to 16 bytes transfers */
408 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", 678 if (len > 16)
409 np->full_name); 679 return -EINVAL;
410 return; 680
681 init_completion(&comp);
682
683 for (retry = 0; retry < 16; retry++) {
684 memset(req, 0, sizeof(struct adb_request));
685 hdr->bus = bus->channel;
686 hdr->count = len;
687
688 switch(bus->mode) {
689 case pmac_i2c_mode_std:
690 if (subsize != 0)
691 return -EINVAL;
692 hdr->address = addrdir;
693 hdr->mode = PMU_I2C_MODE_SIMPLE;
694 break;
695 case pmac_i2c_mode_stdsub:
696 case pmac_i2c_mode_combined:
697 if (subsize != 1)
698 return -EINVAL;
699 hdr->address = addrdir & 0xfe;
700 hdr->comb_addr = addrdir;
701 hdr->sub_addr = subaddr;
702 if (bus->mode == pmac_i2c_mode_stdsub)
703 hdr->mode = PMU_I2C_MODE_STDSUB;
704 else
705 hdr->mode = PMU_I2C_MODE_COMBINED;
706 break;
707 default:
708 return -EINVAL;
709 }
710
711 INIT_COMPLETION(comp);
712 req->data[0] = PMU_I2C_CMD;
713 req->reply[0] = 0xff;
714 req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
715 req->done = pmu_i2c_complete;
716 req->arg = &comp;
717 if (!read && len) {
718 memcpy(hdr->data, data, len);
719 req->nbytes += len;
720 }
721 rc = pmu_queue_request(req);
722 if (rc)
723 return rc;
724 wait_for_completion(&comp);
725 if (req->reply[0] == PMU_I2C_STATUS_OK)
726 break;
727 msleep(15);
411 } 728 }
412 memset(host, 0, sizeof(*host)); 729 if (req->reply[0] != PMU_I2C_STATUS_OK)
730 return -EIO;
413 731
414 init_MUTEX(&host->mutex); 732 for (retry = 0; retry < 16; retry++) {
415 host->np = of_node_get(np); 733 memset(req, 0, sizeof(struct adb_request));
416 host->num_channels = 3; 734
417 host->mode = pmac_low_i2c_mode_std; 735 /* I know that looks like a lot, slow as hell, but darwin
418 host->func = pmu_low_i2c_func; 736 * does it so let's be on the safe side for now
737 */
738 msleep(15);
739
740 hdr->bus = PMU_I2C_BUS_STATUS;
741
742 INIT_COMPLETION(comp);
743 req->data[0] = PMU_I2C_CMD;
744 req->reply[0] = 0xff;
745 req->nbytes = 2;
746 req->done = pmu_i2c_complete;
747 req->arg = &comp;
748 rc = pmu_queue_request(req);
749 if (rc)
750 return rc;
751 wait_for_completion(&comp);
752
753 if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
754 return 0;
755 if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
756 int rlen = req->reply_len - 1;
757
758 if (rlen != len) {
759 printk(KERN_WARNING "low_i2c: PMU returned %d"
760 " bytes, expected %d !\n", rlen, len);
761 return -EIO;
762 }
763 if (len)
764 memcpy(data, &req->reply[1], len);
765 return 0;
766 }
767 }
768 return -EIO;
769}
770
771static void __init pmu_i2c_probe(void)
772{
773 struct pmac_i2c_bus *bus;
774 struct device_node *busnode;
775 int channel, sz;
776
777 if (!pmu_present())
778 return;
779
780 /* There might or might not be a "pmu-i2c" node, we use that
781 * or via-pmu itself, whatever we find. I haven't seen a machine
782 * with separate bus nodes, so we assume a multibus setup
783 */
784 busnode = of_find_node_by_name(NULL, "pmu-i2c");
785 if (busnode == NULL)
786 busnode = of_find_node_by_name(NULL, "via-pmu");
787 if (busnode == NULL)
788 return;
789
790 printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
791
792 /*
793 * We add bus 1 and 2 only for now, bus 0 is "special"
794 */
795 for (channel = 1; channel <= 2; channel++) {
796 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
797 bus = kzalloc(sz, GFP_KERNEL);
798 if (bus == NULL)
799 return;
800
801 bus->controller = busnode;
802 bus->busnode = busnode;
803 bus->type = pmac_i2c_bus_pmu;
804 bus->channel = channel;
805 bus->mode = pmac_i2c_mode_std;
806 bus->hostdata = bus + 1;
807 bus->xfer = pmu_i2c_xfer;
808 init_MUTEX(&bus->sem);
809 bus->flags = pmac_i2c_multibus;
810 list_add(&bus->link, &pmac_i2c_busses);
811
812 printk(KERN_INFO " channel %d bus <multibus>\n", channel);
813 }
419} 814}
420 815
421#endif /* CONFIG_ADB_PMU */ 816#endif /* CONFIG_ADB_PMU */
422 817
423void __init pmac_init_low_i2c(void) 818
819/*
820 *
821 * SMU implementation
822 *
823 */
824
825#ifdef CONFIG_PMAC_SMU
826
827static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
424{ 828{
425 struct device_node *np; 829 complete(misc);
830}
426 831
427 /* Probe keywest-i2c busses */ 832static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
428 np = of_find_compatible_node(NULL, "i2c", "keywest-i2c"); 833 u32 subaddr, u8 *data, int len)
429 while(np) { 834{
430 keywest_low_i2c_add(np); 835 struct smu_i2c_cmd *cmd = bus->hostdata;
431 np = of_find_compatible_node(np, "i2c", "keywest-i2c"); 836 struct completion comp;
837 int read = addrdir & 1;
838 int rc = 0;
839
840 if ((read && len > SMU_I2C_READ_MAX) ||
841 ((!read) && len > SMU_I2C_WRITE_MAX))
842 return -EINVAL;
843
844 memset(cmd, 0, sizeof(struct smu_i2c_cmd));
845 cmd->info.bus = bus->channel;
846 cmd->info.devaddr = addrdir;
847 cmd->info.datalen = len;
848
849 switch(bus->mode) {
850 case pmac_i2c_mode_std:
851 if (subsize != 0)
852 return -EINVAL;
853 cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
854 break;
855 case pmac_i2c_mode_stdsub:
856 case pmac_i2c_mode_combined:
857 if (subsize > 3 || subsize < 1)
858 return -EINVAL;
859 cmd->info.sublen = subsize;
860 /* that's big-endian only but heh ! */
861 memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
862 subsize);
863 if (bus->mode == pmac_i2c_mode_stdsub)
864 cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
865 else
866 cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
867 break;
868 default:
869 return -EINVAL;
432 } 870 }
871 if (!read && len)
872 memcpy(cmd->info.data, data, len);
873
874 init_completion(&comp);
875 cmd->done = smu_i2c_complete;
876 cmd->misc = &comp;
877 rc = smu_queue_i2c(cmd);
878 if (rc < 0)
879 return rc;
880 wait_for_completion(&comp);
881 rc = cmd->status;
882
883 if (read && len)
884 memcpy(data, cmd->info.data, len);
885 return rc < 0 ? rc : 0;
886}
887
888static void __init smu_i2c_probe(void)
889{
890 struct device_node *controller, *busnode;
891 struct pmac_i2c_bus *bus;
892 u32 *reg;
893 int sz;
894
895 if (!smu_present())
896 return;
897
898 controller = of_find_node_by_name(NULL, "smu-i2c-control");
899 if (controller == NULL)
900 controller = of_find_node_by_name(NULL, "smu");
901 if (controller == NULL)
902 return;
903
904 printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
905
906 /* Look for childs, note that they might not be of the right
907 * type as older device trees mix i2c busses and other thigns
908 * at the same level
909 */
910 for (busnode = NULL;
911 (busnode = of_get_next_child(controller, busnode)) != NULL;) {
912 if (strcmp(busnode->type, "i2c") &&
913 strcmp(busnode->type, "i2c-bus"))
914 continue;
915 reg = (u32 *)get_property(busnode, "reg", NULL);
916 if (reg == NULL)
917 continue;
918
919 sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
920 bus = kzalloc(sz, GFP_KERNEL);
921 if (bus == NULL)
922 return;
923
924 bus->controller = controller;
925 bus->busnode = of_node_get(busnode);
926 bus->type = pmac_i2c_bus_smu;
927 bus->channel = *reg;
928 bus->mode = pmac_i2c_mode_std;
929 bus->hostdata = bus + 1;
930 bus->xfer = smu_i2c_xfer;
931 init_MUTEX(&bus->sem);
932 bus->flags = 0;
933 list_add(&bus->link, &pmac_i2c_busses);
934
935 printk(KERN_INFO " channel %x bus %s\n",
936 bus->channel, busnode->full_name);
937 }
938}
939
940#endif /* CONFIG_PMAC_SMU */
941
942/*
943 *
944 * Core code
945 *
946 */
433 947
434#ifdef CONFIG_ADB_PMU
435 /* Probe PMU busses */
436 np = of_find_node_by_name(NULL, "via-pmu");
437 if (np)
438 pmu_low_i2c_add(np);
439#endif /* CONFIG_ADB_PMU */
440 948
441 /* TODO: Add CUDA support as well */ 949struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
950{
951 struct device_node *p = of_node_get(node);
952 struct device_node *prev = NULL;
953 struct pmac_i2c_bus *bus;
954
955 while(p) {
956 list_for_each_entry(bus, &pmac_i2c_busses, link) {
957 if (p == bus->busnode) {
958 if (prev && bus->flags & pmac_i2c_multibus) {
959 u32 *reg;
960 reg = (u32 *)get_property(prev, "reg",
961 NULL);
962 if (!reg)
963 continue;
964 if (((*reg) >> 8) != bus->channel)
965 continue;
966 }
967 of_node_put(p);
968 of_node_put(prev);
969 return bus;
970 }
971 }
972 of_node_put(prev);
973 prev = p;
974 p = of_get_parent(p);
975 }
976 return NULL;
442} 977}
978EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
979
980u8 pmac_i2c_get_dev_addr(struct device_node *device)
981{
982 u32 *reg = (u32 *)get_property(device, "reg", NULL);
983
984 if (reg == NULL)
985 return 0;
986
987 return (*reg) & 0xff;
988}
989EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
990
991struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
992{
993 return bus->controller;
994}
995EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
996
997struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
998{
999 return bus->busnode;
1000}
1001EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
1002
1003int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
1004{
1005 return bus->type;
1006}
1007EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
1008
1009int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
1010{
1011 return bus->flags;
1012}
1013EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
1014
1015int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
1016{
1017 return bus->channel;
1018}
1019EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
1020
1021
1022void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus,
1023 struct i2c_adapter *adapter)
1024{
1025 WARN_ON(bus->adapter != NULL);
1026 bus->adapter = adapter;
1027}
1028EXPORT_SYMBOL_GPL(pmac_i2c_attach_adapter);
1029
1030void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus,
1031 struct i2c_adapter *adapter)
1032{
1033 WARN_ON(bus->adapter != adapter);
1034 bus->adapter = NULL;
1035}
1036EXPORT_SYMBOL_GPL(pmac_i2c_detach_adapter);
1037
1038struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
1039{
1040 return bus->adapter;
1041}
1042EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
1043
1044struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
1045{
1046 struct pmac_i2c_bus *bus;
1047
1048 list_for_each_entry(bus, &pmac_i2c_busses, link)
1049 if (bus->adapter == adapter)
1050 return bus;
1051 return NULL;
1052}
1053EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
1054
1055extern int pmac_i2c_match_adapter(struct device_node *dev,
1056 struct i2c_adapter *adapter)
1057{
1058 struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
1059
1060 if (bus == NULL)
1061 return 0;
1062 return (bus->adapter == adapter);
1063}
1064EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
443 1065
444int pmac_low_i2c_lock(struct device_node *np) 1066int pmac_low_i2c_lock(struct device_node *np)
445{ 1067{
446 struct low_i2c_host *host = find_low_i2c_host(np); 1068 struct pmac_i2c_bus *bus, *found = NULL;
447 1069
448 if (!host) 1070 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1071 if (np == bus->controller) {
1072 found = bus;
1073 break;
1074 }
1075 }
1076 if (!found)
449 return -ENODEV; 1077 return -ENODEV;
450 down(&host->mutex); 1078 return pmac_i2c_open(bus, 0);
451 return 0;
452} 1079}
453EXPORT_SYMBOL(pmac_low_i2c_lock); 1080EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
454 1081
455int pmac_low_i2c_unlock(struct device_node *np) 1082int pmac_low_i2c_unlock(struct device_node *np)
456{ 1083{
457 struct low_i2c_host *host = find_low_i2c_host(np); 1084 struct pmac_i2c_bus *bus, *found = NULL;
458 1085
459 if (!host) 1086 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1087 if (np == bus->controller) {
1088 found = bus;
1089 break;
1090 }
1091 }
1092 if (!found)
460 return -ENODEV; 1093 return -ENODEV;
461 up(&host->mutex); 1094 pmac_i2c_close(bus);
462 return 0; 1095 return 0;
463} 1096}
464EXPORT_SYMBOL(pmac_low_i2c_unlock); 1097EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
465 1098
466 1099
467int pmac_low_i2c_open(struct device_node *np, int channel) 1100int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
468{ 1101{
469 struct low_i2c_host *host = find_low_i2c_host(np); 1102 int rc;
1103
1104 down(&bus->sem);
1105 bus->polled = polled || pmac_i2c_force_poll;
1106 bus->opened = 1;
1107 bus->mode = pmac_i2c_mode_std;
1108 if (bus->open && (rc = bus->open(bus)) != 0) {
1109 bus->opened = 0;
1110 up(&bus->sem);
1111 return rc;
1112 }
1113 return 0;
1114}
1115EXPORT_SYMBOL_GPL(pmac_i2c_open);
470 1116
471 if (!host) 1117void pmac_i2c_close(struct pmac_i2c_bus *bus)
472 return -ENODEV; 1118{
1119 WARN_ON(!bus->opened);
1120 if (bus->close)
1121 bus->close(bus);
1122 bus->opened = 0;
1123 up(&bus->sem);
1124}
1125EXPORT_SYMBOL_GPL(pmac_i2c_close);
473 1126
474 if (channel >= host->num_channels) 1127int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
1128{
1129 WARN_ON(!bus->opened);
1130
1131 /* Report me if you see the error below as there might be a new
1132 * "combined4" mode that I need to implement for the SMU bus
1133 */
1134 if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
1135 printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
1136 " bus %s !\n", mode, bus->busnode->full_name);
475 return -EINVAL; 1137 return -EINVAL;
476 1138 }
477 down(&host->mutex); 1139 bus->mode = mode;
478 host->is_open = 1;
479 host->channel = channel;
480 1140
481 return 0; 1141 return 0;
482} 1142}
483EXPORT_SYMBOL(pmac_low_i2c_open); 1143EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
484 1144
485int pmac_low_i2c_close(struct device_node *np) 1145int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
1146 u32 subaddr, u8 *data, int len)
486{ 1147{
487 struct low_i2c_host *host = find_low_i2c_host(np); 1148 int rc;
488 1149
489 if (!host) 1150 WARN_ON(!bus->opened);
490 return -ENODEV;
491 1151
492 host->is_open = 0; 1152 DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
493 up(&host->mutex); 1153 " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
1154 subaddr, len, bus->busnode->full_name);
494 1155
495 return 0; 1156 rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
1157
1158#ifdef DEBUG
1159 if (rc)
1160 DBG("xfer error %d\n", rc);
1161#endif
1162 return rc;
496} 1163}
497EXPORT_SYMBOL(pmac_low_i2c_close); 1164EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
1165
1166/* some quirks for platform function decoding */
1167enum {
1168 pmac_i2c_quirk_invmask = 0x00000001u,
1169};
498 1170
499int pmac_low_i2c_setmode(struct device_node *np, int mode) 1171static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
1172 int quirks))
500{ 1173{
501 struct low_i2c_host *host = find_low_i2c_host(np); 1174 struct pmac_i2c_bus *bus;
1175 struct device_node *np;
1176 static struct whitelist_ent {
1177 char *name;
1178 char *compatible;
1179 int quirks;
1180 } whitelist[] = {
1181 /* XXX Study device-tree's & apple drivers are get the quirks
1182 * right !
1183 */
1184 { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
1185 { "i2c-cpu-voltage", NULL, 0},
1186 { "temp-monitor", NULL, 0 },
1187 { "supply-monitor", NULL, 0 },
1188 { NULL, NULL, 0 },
1189 };
1190
1191 /* Only some devices need to have platform functions instanciated
1192 * here. For now, we have a table. Others, like 9554 i2c GPIOs used
1193 * on Xserve, if we ever do a driver for them, will use their own
1194 * platform function instance
1195 */
1196 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1197 for (np = NULL;
1198 (np = of_get_next_child(bus->busnode, np)) != NULL;) {
1199 struct whitelist_ent *p;
1200 /* If multibus, check if device is on that bus */
1201 if (bus->flags & pmac_i2c_multibus)
1202 if (bus != pmac_i2c_find_bus(np))
1203 continue;
1204 for (p = whitelist; p->name != NULL; p++) {
1205 if (strcmp(np->name, p->name))
1206 continue;
1207 if (p->compatible &&
1208 !device_is_compatible(np, p->compatible))
1209 continue;
1210 callback(np, p->quirks);
1211 break;
1212 }
1213 }
1214 }
1215}
502 1216
503 if (!host) 1217#define MAX_I2C_DATA 64
504 return -ENODEV; 1218
505 WARN_ON(!host->is_open); 1219struct pmac_i2c_pf_inst
506 host->mode = mode; 1220{
1221 struct pmac_i2c_bus *bus;
1222 u8 addr;
1223 u8 buffer[MAX_I2C_DATA];
1224 u8 scratch[MAX_I2C_DATA];
1225 int bytes;
1226 int quirks;
1227};
1228
1229static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
1230{
1231 struct pmac_i2c_pf_inst *inst;
1232 struct pmac_i2c_bus *bus;
1233
1234 bus = pmac_i2c_find_bus(func->node);
1235 if (bus == NULL) {
1236 printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
1237 func->node->full_name);
1238 return NULL;
1239 }
1240 if (pmac_i2c_open(bus, 0)) {
1241 printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
1242 func->node->full_name);
1243 return NULL;
1244 }
1245
1246 /* XXX might need GFP_ATOMIC when called during the suspend process,
1247 * but then, there are already lots of issues with suspending when
1248 * near OOM that need to be resolved, the allocator itself should
1249 * probably make GFP_NOIO implicit during suspend
1250 */
1251 inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
1252 if (inst == NULL) {
1253 pmac_i2c_close(bus);
1254 return NULL;
1255 }
1256 inst->bus = bus;
1257 inst->addr = pmac_i2c_get_dev_addr(func->node);
1258 inst->quirks = (int)(long)func->driver_data;
1259 return inst;
1260}
1261
1262static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
1263{
1264 struct pmac_i2c_pf_inst *inst = instdata;
1265
1266 if (inst == NULL)
1267 return;
1268 pmac_i2c_close(inst->bus);
1269 if (inst)
1270 kfree(inst);
1271}
1272
1273static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
1274{
1275 struct pmac_i2c_pf_inst *inst = instdata;
1276
1277 inst->bytes = len;
1278 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
1279 inst->buffer, len);
1280}
1281
1282static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
1283{
1284 struct pmac_i2c_pf_inst *inst = instdata;
1285
1286 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
1287 (u8 *)data, len);
1288}
1289
1290/* This function is used to do the masking & OR'ing for the "rmw" type
1291 * callbacks. Ze should apply the mask and OR in the values in the
1292 * buffer before writing back. The problem is that it seems that
1293 * various darwin drivers implement the mask/or differently, thus
1294 * we need to check the quirks first
1295 */
1296static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
1297 u32 len, const u8 *mask, const u8 *val)
1298{
1299 int i;
1300
1301 if (inst->quirks & pmac_i2c_quirk_invmask) {
1302 for (i = 0; i < len; i ++)
1303 inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
1304 } else {
1305 for (i = 0; i < len; i ++)
1306 inst->scratch[i] = (inst->buffer[i] & ~mask[i])
1307 | (val[i] & mask[i]);
1308 }
1309}
1310
1311static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
1312 u32 totallen, const u8 *maskdata,
1313 const u8 *valuedata)
1314{
1315 struct pmac_i2c_pf_inst *inst = instdata;
1316
1317 if (masklen > inst->bytes || valuelen > inst->bytes ||
1318 totallen > inst->bytes || valuelen > masklen)
1319 return -EINVAL;
1320
1321 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
1322
1323 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
1324 inst->scratch, totallen);
1325}
1326
1327static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
1328{
1329 struct pmac_i2c_pf_inst *inst = instdata;
1330
1331 inst->bytes = len;
1332 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
1333 inst->buffer, len);
1334}
1335
1336static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
1337 const u8 *data)
1338{
1339 struct pmac_i2c_pf_inst *inst = instdata;
1340
1341 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
1342 subaddr, (u8 *)data, len);
1343}
507 1344
1345static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
1346{
1347 struct pmac_i2c_pf_inst *inst = instdata;
1348
1349 return pmac_i2c_setmode(inst->bus, mode);
1350}
1351
1352static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
1353 u32 valuelen, u32 totallen, const u8 *maskdata,
1354 const u8 *valuedata)
1355{
1356 struct pmac_i2c_pf_inst *inst = instdata;
1357
1358 if (masklen > inst->bytes || valuelen > inst->bytes ||
1359 totallen > inst->bytes || valuelen > masklen)
1360 return -EINVAL;
1361
1362 pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
1363
1364 return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
1365 subaddr, inst->scratch, totallen);
1366}
1367
1368static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
1369 const u8 *maskdata,
1370 const u8 *valuedata)
1371{
1372 struct pmac_i2c_pf_inst *inst = instdata;
1373 int i, match;
1374
1375 /* Get return value pointer, it's assumed to be a u32 */
1376 if (!args || !args->count || !args->u[0].p)
1377 return -EINVAL;
1378
1379 /* Check buffer */
1380 if (len > inst->bytes)
1381 return -EINVAL;
1382
1383 for (i = 0, match = 1; match && i < len; i ++)
1384 if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
1385 match = 0;
1386 *args->u[0].p = match;
508 return 0; 1387 return 0;
509} 1388}
510EXPORT_SYMBOL(pmac_low_i2c_setmode);
511 1389
512int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len) 1390static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
513{ 1391{
514 struct low_i2c_host *host = find_low_i2c_host(np); 1392 msleep((duration + 999) / 1000);
1393 return 0;
1394}
515 1395
516 if (!host)
517 return -ENODEV;
518 WARN_ON(!host->is_open);
519 1396
520 return host->func(host, addrdir, subaddr, data, len); 1397static struct pmf_handlers pmac_i2c_pfunc_handlers = {
1398 .begin = pmac_i2c_do_begin,
1399 .end = pmac_i2c_do_end,
1400 .read_i2c = pmac_i2c_do_read,
1401 .write_i2c = pmac_i2c_do_write,
1402 .rmw_i2c = pmac_i2c_do_rmw,
1403 .read_i2c_sub = pmac_i2c_do_read_sub,
1404 .write_i2c_sub = pmac_i2c_do_write_sub,
1405 .rmw_i2c_sub = pmac_i2c_do_rmw_sub,
1406 .set_i2c_mode = pmac_i2c_do_set_mode,
1407 .mask_and_compare = pmac_i2c_do_mask_and_comp,
1408 .delay = pmac_i2c_do_delay,
1409};
1410
1411static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
1412{
1413 DBG("dev_create(%s)\n", np->full_name);
1414
1415 pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
1416 (void *)(long)quirks);
1417}
1418
1419static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
1420{
1421 DBG("dev_create(%s)\n", np->full_name);
1422
1423 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
1424}
1425
1426static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
1427{
1428 DBG("dev_suspend(%s)\n", np->full_name);
1429 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
1430}
1431
1432static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
1433{
1434 DBG("dev_resume(%s)\n", np->full_name);
1435 pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
1436}
1437
1438void pmac_pfunc_i2c_suspend(void)
1439{
1440 pmac_i2c_devscan(pmac_i2c_dev_suspend);
1441}
1442
1443void pmac_pfunc_i2c_resume(void)
1444{
1445 pmac_i2c_devscan(pmac_i2c_dev_resume);
1446}
1447
1448/*
1449 * Initialize us: probe all i2c busses on the machine, instantiate
1450 * busses and platform functions as needed.
1451 */
1452/* This is non-static as it might be called early by smp code */
1453int __init pmac_i2c_init(void)
1454{
1455 static int i2c_inited;
1456
1457 if (i2c_inited)
1458 return 0;
1459 i2c_inited = 1;
1460
1461 /* Probe keywest-i2c busses */
1462 kw_i2c_probe();
1463
1464#ifdef CONFIG_ADB_PMU
1465 /* Probe PMU i2c busses */
1466 pmu_i2c_probe();
1467#endif
1468
1469#ifdef CONFIG_PMAC_SMU
1470 /* Probe SMU i2c busses */
1471 smu_i2c_probe();
1472#endif
1473
1474 /* Now add plaform functions for some known devices */
1475 pmac_i2c_devscan(pmac_i2c_dev_create);
1476
1477 return 0;
521} 1478}
522EXPORT_SYMBOL(pmac_low_i2c_xfer); 1479arch_initcall(pmac_i2c_init);
1480
1481/* Since pmac_i2c_init can be called too early for the platform device
1482 * registration, we need to do it at a later time. In our case, subsys
1483 * happens to fit well, though I agree it's a bit of a hack...
1484 */
1485static int __init pmac_i2c_create_platform_devices(void)
1486{
1487 struct pmac_i2c_bus *bus;
1488 int i = 0;
1489
1490 /* In the case where we are initialized from smp_init(), we must
1491 * not use the timer (and thus the irq). It's safe from now on
1492 * though
1493 */
1494 pmac_i2c_force_poll = 0;
1495
1496 /* Create platform devices */
1497 list_for_each_entry(bus, &pmac_i2c_busses, link) {
1498 bus->platform_dev =
1499 platform_device_alloc("i2c-powermac", i++);
1500 if (bus->platform_dev == NULL)
1501 return -ENOMEM;
1502 bus->platform_dev->dev.platform_data = bus;
1503 platform_device_add(bus->platform_dev);
1504 }
1505
1506 /* Now call platform "init" functions */
1507 pmac_i2c_devscan(pmac_i2c_dev_init);
523 1508
1509 return 0;
1510}
1511subsys_initcall(pmac_i2c_create_platform_devices);
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 4042e2f06ee..3ebd045a335 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -514,7 +514,7 @@ static void core99_nvram_sync(void)
514#endif 514#endif
515} 515}
516 516
517static int __init core99_nvram_setup(struct device_node *dp) 517static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
518{ 518{
519 int i; 519 int i;
520 u32 gen_bank0, gen_bank1; 520 u32 gen_bank0, gen_bank1;
@@ -528,7 +528,7 @@ static int __init core99_nvram_setup(struct device_node *dp)
528 printk(KERN_ERR "nvram: can't allocate ram image\n"); 528 printk(KERN_ERR "nvram: can't allocate ram image\n");
529 return -ENOMEM; 529 return -ENOMEM;
530 } 530 }
531 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); 531 nvram_data = ioremap(addr, NVRAM_SIZE*2);
532 nvram_naddrs = 1; /* Make sure we get the correct case */ 532 nvram_naddrs = 1; /* Make sure we get the correct case */
533 533
534 DBG("nvram: Checking bank 0...\n"); 534 DBG("nvram: Checking bank 0...\n");
@@ -549,6 +549,7 @@ static int __init core99_nvram_setup(struct device_node *dp)
549 ppc_md.nvram_write = core99_nvram_write; 549 ppc_md.nvram_write = core99_nvram_write;
550 ppc_md.nvram_size = core99_nvram_size; 550 ppc_md.nvram_size = core99_nvram_size;
551 ppc_md.nvram_sync = core99_nvram_sync; 551 ppc_md.nvram_sync = core99_nvram_sync;
552 ppc_md.machine_shutdown = core99_nvram_sync;
552 /* 553 /*
553 * Maybe we could be smarter here though making an exclusive list 554 * Maybe we could be smarter here though making an exclusive list
554 * of known flash chips is a bit nasty as older OF didn't provide us 555 * of known flash chips is a bit nasty as older OF didn't provide us
@@ -569,34 +570,48 @@ static int __init core99_nvram_setup(struct device_node *dp)
569int __init pmac_nvram_init(void) 570int __init pmac_nvram_init(void)
570{ 571{
571 struct device_node *dp; 572 struct device_node *dp;
573 struct resource r1, r2;
574 unsigned int s1 = 0, s2 = 0;
572 int err = 0; 575 int err = 0;
573 576
574 nvram_naddrs = 0; 577 nvram_naddrs = 0;
575 578
576 dp = find_devices("nvram"); 579 dp = of_find_node_by_name(NULL, "nvram");
577 if (dp == NULL) { 580 if (dp == NULL) {
578 printk(KERN_ERR "Can't find NVRAM device\n"); 581 printk(KERN_ERR "Can't find NVRAM device\n");
579 return -ENODEV; 582 return -ENODEV;
580 } 583 }
581 nvram_naddrs = dp->n_addrs; 584
585 /* Try to obtain an address */
586 if (of_address_to_resource(dp, 0, &r1) == 0) {
587 nvram_naddrs = 1;
588 s1 = (r1.end - r1.start) + 1;
589 if (of_address_to_resource(dp, 1, &r2) == 0) {
590 nvram_naddrs = 2;
591 s2 = (r2.end - r2.start) + 1;
592 }
593 }
594
582 is_core_99 = device_is_compatible(dp, "nvram,flash"); 595 is_core_99 = device_is_compatible(dp, "nvram,flash");
583 if (is_core_99) 596 if (is_core_99) {
584 err = core99_nvram_setup(dp); 597 err = core99_nvram_setup(dp, r1.start);
598 goto bail;
599 }
600
585#ifdef CONFIG_PPC32 601#ifdef CONFIG_PPC32
586 else if (_machine == _MACH_chrp && nvram_naddrs == 1) { 602 if (_machine == _MACH_chrp && nvram_naddrs == 1) {
587 nvram_data = ioremap(dp->addrs[0].address + isa_mem_base, 603 nvram_data = ioremap(r1.start, s1);
588 dp->addrs[0].size);
589 nvram_mult = 1; 604 nvram_mult = 1;
590 ppc_md.nvram_read_val = direct_nvram_read_byte; 605 ppc_md.nvram_read_val = direct_nvram_read_byte;
591 ppc_md.nvram_write_val = direct_nvram_write_byte; 606 ppc_md.nvram_write_val = direct_nvram_write_byte;
592 } else if (nvram_naddrs == 1) { 607 } else if (nvram_naddrs == 1) {
593 nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size); 608 nvram_data = ioremap(r1.start, s1);
594 nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE; 609 nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
595 ppc_md.nvram_read_val = direct_nvram_read_byte; 610 ppc_md.nvram_read_val = direct_nvram_read_byte;
596 ppc_md.nvram_write_val = direct_nvram_write_byte; 611 ppc_md.nvram_write_val = direct_nvram_write_byte;
597 } else if (nvram_naddrs == 2) { 612 } else if (nvram_naddrs == 2) {
598 nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size); 613 nvram_addr = ioremap(r1.start, s1);
599 nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size); 614 nvram_data = ioremap(r2.start, s2);
600 ppc_md.nvram_read_val = indirect_nvram_read_byte; 615 ppc_md.nvram_read_val = indirect_nvram_read_byte;
601 ppc_md.nvram_write_val = indirect_nvram_write_byte; 616 ppc_md.nvram_write_val = indirect_nvram_write_byte;
602 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { 617 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
@@ -605,13 +620,15 @@ int __init pmac_nvram_init(void)
605 ppc_md.nvram_read_val = pmu_nvram_read_byte; 620 ppc_md.nvram_read_val = pmu_nvram_read_byte;
606 ppc_md.nvram_write_val = pmu_nvram_write_byte; 621 ppc_md.nvram_write_val = pmu_nvram_write_byte;
607#endif /* CONFIG_ADB_PMU */ 622#endif /* CONFIG_ADB_PMU */
608 } 623 } else {
609#endif
610 else {
611 printk(KERN_ERR "Incompatible type of NVRAM\n"); 624 printk(KERN_ERR "Incompatible type of NVRAM\n");
612 return -ENXIO; 625 err = -ENXIO;
613 } 626 }
614 lookup_partitions(); 627#endif /* CONFIG_PPC32 */
628bail:
629 of_node_put(dp);
630 if (err == 0)
631 lookup_partitions();
615 return err; 632 return err;
616} 633}
617 634
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 443be526cde..f671ed25390 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Support for PCI bridges found on Power Macintoshes. 2 * Support for PCI bridges found on Power Macintoshes.
3 * 3 *
4 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org) 4 * Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
5 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) 5 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -25,7 +25,7 @@
25#include <asm/pmac_feature.h> 25#include <asm/pmac_feature.h>
26#include <asm/grackle.h> 26#include <asm/grackle.h>
27#ifdef CONFIG_PPC64 27#ifdef CONFIG_PPC64
28#include <asm/iommu.h> 28//#include <asm/iommu.h>
29#include <asm/ppc-pci.h> 29#include <asm/ppc-pci.h>
30#endif 30#endif
31 31
@@ -44,6 +44,7 @@ static int add_bridge(struct device_node *dev);
44static int has_uninorth; 44static int has_uninorth;
45#ifdef CONFIG_PPC64 45#ifdef CONFIG_PPC64
46static struct pci_controller *u3_agp; 46static struct pci_controller *u3_agp;
47static struct pci_controller *u4_pcie;
47static struct pci_controller *u3_ht; 48static struct pci_controller *u3_ht;
48#endif /* CONFIG_PPC64 */ 49#endif /* CONFIG_PPC64 */
49 50
@@ -97,11 +98,8 @@ static void __init fixup_bus_range(struct device_node *bridge)
97 98
98 /* Lookup the "bus-range" property for the hose */ 99 /* Lookup the "bus-range" property for the hose */
99 bus_range = (int *) get_property(bridge, "bus-range", &len); 100 bus_range = (int *) get_property(bridge, "bus-range", &len);
100 if (bus_range == NULL || len < 2 * sizeof(int)) { 101 if (bus_range == NULL || len < 2 * sizeof(int))
101 printk(KERN_WARNING "Can't get bus-range for %s\n",
102 bridge->full_name);
103 return; 102 return;
104 }
105 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); 103 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
106} 104}
107 105
@@ -128,14 +126,14 @@ static void __init fixup_bus_range(struct device_node *bridge)
128 */ 126 */
129 127
130#define MACRISC_CFA0(devfn, off) \ 128#define MACRISC_CFA0(devfn, off) \
131 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \ 129 ((1 << (unsigned int)PCI_SLOT(dev_fn)) \
132 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \ 130 | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
133 | (((unsigned long)(off)) & 0xFCUL)) 131 | (((unsigned int)(off)) & 0xFCUL))
134 132
135#define MACRISC_CFA1(bus, devfn, off) \ 133#define MACRISC_CFA1(bus, devfn, off) \
136 ((((unsigned long)(bus)) << 16) \ 134 ((((unsigned int)(bus)) << 16) \
137 |(((unsigned long)(devfn)) << 8) \ 135 |(((unsigned int)(devfn)) << 8) \
138 |(((unsigned long)(off)) & 0xFCUL) \ 136 |(((unsigned int)(off)) & 0xFCUL) \
139 |1UL) 137 |1UL)
140 138
141static unsigned long macrisc_cfg_access(struct pci_controller* hose, 139static unsigned long macrisc_cfg_access(struct pci_controller* hose,
@@ -168,7 +166,8 @@ static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
168 hose = pci_bus_to_host(bus); 166 hose = pci_bus_to_host(bus);
169 if (hose == NULL) 167 if (hose == NULL)
170 return PCIBIOS_DEVICE_NOT_FOUND; 168 return PCIBIOS_DEVICE_NOT_FOUND;
171 169 if (offset >= 0x100)
170 return PCIBIOS_BAD_REGISTER_NUMBER;
172 addr = macrisc_cfg_access(hose, bus->number, devfn, offset); 171 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
173 if (!addr) 172 if (!addr)
174 return PCIBIOS_DEVICE_NOT_FOUND; 173 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -199,7 +198,8 @@ static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
199 hose = pci_bus_to_host(bus); 198 hose = pci_bus_to_host(bus);
200 if (hose == NULL) 199 if (hose == NULL)
201 return PCIBIOS_DEVICE_NOT_FOUND; 200 return PCIBIOS_DEVICE_NOT_FOUND;
202 201 if (offset >= 0x100)
202 return PCIBIOS_BAD_REGISTER_NUMBER;
203 addr = macrisc_cfg_access(hose, bus->number, devfn, offset); 203 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
204 if (!addr) 204 if (!addr)
205 return PCIBIOS_DEVICE_NOT_FOUND; 205 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -234,12 +234,13 @@ static struct pci_ops macrisc_pci_ops =
234/* 234/*
235 * Verify that a specific (bus, dev_fn) exists on chaos 235 * Verify that a specific (bus, dev_fn) exists on chaos
236 */ 236 */
237static int 237static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
238chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
239{ 238{
240 struct device_node *np; 239 struct device_node *np;
241 u32 *vendor, *device; 240 u32 *vendor, *device;
242 241
242 if (offset >= 0x100)
243 return PCIBIOS_BAD_REGISTER_NUMBER;
243 np = pci_busdev_to_OF_node(bus, devfn); 244 np = pci_busdev_to_OF_node(bus, devfn);
244 if (np == NULL) 245 if (np == NULL)
245 return PCIBIOS_DEVICE_NOT_FOUND; 246 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -285,15 +286,13 @@ static struct pci_ops chaos_pci_ops =
285}; 286};
286 287
287static void __init setup_chaos(struct pci_controller *hose, 288static void __init setup_chaos(struct pci_controller *hose,
288 struct reg_property *addr) 289 struct resource *addr)
289{ 290{
290 /* assume a `chaos' bridge */ 291 /* assume a `chaos' bridge */
291 hose->ops = &chaos_pci_ops; 292 hose->ops = &chaos_pci_ops;
292 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 293 hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
293 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); 294 hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
294} 295}
295#else
296#define setup_chaos(hose, addr)
297#endif /* CONFIG_PPC32 */ 296#endif /* CONFIG_PPC32 */
298 297
299#ifdef CONFIG_PPC64 298#ifdef CONFIG_PPC64
@@ -326,7 +325,7 @@ static int u3_ht_skip_device(struct pci_controller *hose,
326 else 325 else
327 busdn = hose->arch_data; 326 busdn = hose->arch_data;
328 for (dn = busdn->child; dn; dn = dn->sibling) 327 for (dn = busdn->child; dn; dn = dn->sibling)
329 if (dn->data && PCI_DN(dn)->devfn == devfn) 328 if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
330 break; 329 break;
331 if (dn == NULL) 330 if (dn == NULL)
332 return -1; 331 return -1;
@@ -343,10 +342,10 @@ static int u3_ht_skip_device(struct pci_controller *hose,
343} 342}
344 343
345#define U3_HT_CFA0(devfn, off) \ 344#define U3_HT_CFA0(devfn, off) \
346 ((((unsigned long)devfn) << 8) | offset) 345 ((((unsigned int)devfn) << 8) | offset)
347#define U3_HT_CFA1(bus, devfn, off) \ 346#define U3_HT_CFA1(bus, devfn, off) \
348 (U3_HT_CFA0(devfn, off) \ 347 (U3_HT_CFA0(devfn, off) \
349 + (((unsigned long)bus) << 16) \ 348 + (((unsigned int)bus) << 16) \
350 + 0x01000000UL) 349 + 0x01000000UL)
351 350
352static unsigned long u3_ht_cfg_access(struct pci_controller* hose, 351static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
@@ -356,9 +355,11 @@ static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
356 /* For now, we don't self probe U3 HT bridge */ 355 /* For now, we don't self probe U3 HT bridge */
357 if (PCI_SLOT(devfn) == 0) 356 if (PCI_SLOT(devfn) == 0)
358 return 0; 357 return 0;
359 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset); 358 return ((unsigned long)hose->cfg_data) +
359 U3_HT_CFA0(devfn, offset);
360 } else 360 } else
361 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset); 361 return ((unsigned long)hose->cfg_data) +
362 U3_HT_CFA1(bus, devfn, offset);
362} 363}
363 364
364static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, 365static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -370,7 +371,8 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
370 hose = pci_bus_to_host(bus); 371 hose = pci_bus_to_host(bus);
371 if (hose == NULL) 372 if (hose == NULL)
372 return PCIBIOS_DEVICE_NOT_FOUND; 373 return PCIBIOS_DEVICE_NOT_FOUND;
373 374 if (offset >= 0x100)
375 return PCIBIOS_BAD_REGISTER_NUMBER;
374 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); 376 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
375 if (!addr) 377 if (!addr)
376 return PCIBIOS_DEVICE_NOT_FOUND; 378 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -419,7 +421,8 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
419 hose = pci_bus_to_host(bus); 421 hose = pci_bus_to_host(bus);
420 if (hose == NULL) 422 if (hose == NULL)
421 return PCIBIOS_DEVICE_NOT_FOUND; 423 return PCIBIOS_DEVICE_NOT_FOUND;
422 424 if (offset >= 0x100)
425 return PCIBIOS_BAD_REGISTER_NUMBER;
423 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); 426 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
424 if (!addr) 427 if (!addr)
425 return PCIBIOS_DEVICE_NOT_FOUND; 428 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -459,6 +462,112 @@ static struct pci_ops u3_ht_pci_ops =
459 u3_ht_read_config, 462 u3_ht_read_config,
460 u3_ht_write_config 463 u3_ht_write_config
461}; 464};
465
466#define U4_PCIE_CFA0(devfn, off) \
467 ((1 << ((unsigned int)PCI_SLOT(dev_fn))) \
468 | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
469 | ((((unsigned int)(off)) >> 8) << 28) \
470 | (((unsigned int)(off)) & 0xfcU))
471
472#define U4_PCIE_CFA1(bus, devfn, off) \
473 ((((unsigned int)(bus)) << 16) \
474 |(((unsigned int)(devfn)) << 8) \
475 | ((((unsigned int)(off)) >> 8) << 28) \
476 |(((unsigned int)(off)) & 0xfcU) \
477 |1UL)
478
479static unsigned long u4_pcie_cfg_access(struct pci_controller* hose,
480 u8 bus, u8 dev_fn, int offset)
481{
482 unsigned int caddr;
483
484 if (bus == hose->first_busno) {
485 caddr = U4_PCIE_CFA0(dev_fn, offset);
486 } else
487 caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
488
489 /* Uninorth will return garbage if we don't read back the value ! */
490 do {
491 out_le32(hose->cfg_addr, caddr);
492 } while (in_le32(hose->cfg_addr) != caddr);
493
494 offset &= 0x03;
495 return ((unsigned long)hose->cfg_data) + offset;
496}
497
498static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
499 int offset, int len, u32 *val)
500{
501 struct pci_controller *hose;
502 unsigned long addr;
503
504 hose = pci_bus_to_host(bus);
505 if (hose == NULL)
506 return PCIBIOS_DEVICE_NOT_FOUND;
507 if (offset >= 0x1000)
508 return PCIBIOS_BAD_REGISTER_NUMBER;
509 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
510 if (!addr)
511 return PCIBIOS_DEVICE_NOT_FOUND;
512 /*
513 * Note: the caller has already checked that offset is
514 * suitably aligned and that len is 1, 2 or 4.
515 */
516 switch (len) {
517 case 1:
518 *val = in_8((u8 *)addr);
519 break;
520 case 2:
521 *val = in_le16((u16 *)addr);
522 break;
523 default:
524 *val = in_le32((u32 *)addr);
525 break;
526 }
527 return PCIBIOS_SUCCESSFUL;
528}
529
530static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
531 int offset, int len, u32 val)
532{
533 struct pci_controller *hose;
534 unsigned long addr;
535
536 hose = pci_bus_to_host(bus);
537 if (hose == NULL)
538 return PCIBIOS_DEVICE_NOT_FOUND;
539 if (offset >= 0x1000)
540 return PCIBIOS_BAD_REGISTER_NUMBER;
541 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
542 if (!addr)
543 return PCIBIOS_DEVICE_NOT_FOUND;
544 /*
545 * Note: the caller has already checked that offset is
546 * suitably aligned and that len is 1, 2 or 4.
547 */
548 switch (len) {
549 case 1:
550 out_8((u8 *)addr, val);
551 (void) in_8((u8 *)addr);
552 break;
553 case 2:
554 out_le16((u16 *)addr, val);
555 (void) in_le16((u16 *)addr);
556 break;
557 default:
558 out_le32((u32 *)addr, val);
559 (void) in_le32((u32 *)addr);
560 break;
561 }
562 return PCIBIOS_SUCCESSFUL;
563}
564
565static struct pci_ops u4_pcie_pci_ops =
566{
567 u4_pcie_read_config,
568 u4_pcie_write_config
569};
570
462#endif /* CONFIG_PPC64 */ 571#endif /* CONFIG_PPC64 */
463 572
464#ifdef CONFIG_PPC32 573#ifdef CONFIG_PPC32
@@ -532,7 +641,8 @@ static void __init init_p2pbridge(void)
532 } 641 }
533 if (early_read_config_word(hose, bus, devfn, 642 if (early_read_config_word(hose, bus, devfn,
534 PCI_BRIDGE_CONTROL, &val) < 0) { 643 PCI_BRIDGE_CONTROL, &val) < 0) {
535 printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n"); 644 printk(KERN_ERR "init_p2pbridge: couldn't read bridge"
645 " control\n");
536 return; 646 return;
537 } 647 }
538 val &= ~PCI_BRIDGE_CTL_MASTER_ABORT; 648 val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
@@ -576,36 +686,38 @@ static void __init fixup_nec_usb2(void)
576 continue; 686 continue;
577 early_read_config_dword(hose, bus, devfn, 0xe4, &data); 687 early_read_config_dword(hose, bus, devfn, 0xe4, &data);
578 if (data & 1UL) { 688 if (data & 1UL) {
579 printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n"); 689 printk("Found NEC PD720100A USB2 chip with disabled"
690 " EHCI, fixing up...\n");
580 data &= ~1UL; 691 data &= ~1UL;
581 early_write_config_dword(hose, bus, devfn, 0xe4, data); 692 early_write_config_dword(hose, bus, devfn, 0xe4, data);
582 early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE, 693 early_write_config_byte(hose, bus,
694 devfn | 2, PCI_INTERRUPT_LINE,
583 nec->intrs[0].line); 695 nec->intrs[0].line);
584 } 696 }
585 } 697 }
586} 698}
587 699
588static void __init setup_bandit(struct pci_controller *hose, 700static void __init setup_bandit(struct pci_controller *hose,
589 struct reg_property *addr) 701 struct resource *addr)
590{ 702{
591 hose->ops = &macrisc_pci_ops; 703 hose->ops = &macrisc_pci_ops;
592 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 704 hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
593 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); 705 hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
594 init_bandit(hose); 706 init_bandit(hose);
595} 707}
596 708
597static int __init setup_uninorth(struct pci_controller *hose, 709static int __init setup_uninorth(struct pci_controller *hose,
598 struct reg_property *addr) 710 struct resource *addr)
599{ 711{
600 pci_assign_all_buses = 1; 712 pci_assign_all_buses = 1;
601 has_uninorth = 1; 713 has_uninorth = 1;
602 hose->ops = &macrisc_pci_ops; 714 hose->ops = &macrisc_pci_ops;
603 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 715 hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
604 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); 716 hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
605 /* We "know" that the bridge at f2000000 has the PCI slots. */ 717 /* We "know" that the bridge at f2000000 has the PCI slots. */
606 return addr->address == 0xf2000000; 718 return addr->start == 0xf2000000;
607} 719}
608#endif 720#endif /* CONFIG_PPC32 */
609 721
610#ifdef CONFIG_PPC64 722#ifdef CONFIG_PPC64
611static void __init setup_u3_agp(struct pci_controller* hose) 723static void __init setup_u3_agp(struct pci_controller* hose)
@@ -625,15 +737,36 @@ static void __init setup_u3_agp(struct pci_controller* hose)
625 hose->ops = &macrisc_pci_ops; 737 hose->ops = &macrisc_pci_ops;
626 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); 738 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
627 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); 739 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
628
629 u3_agp = hose; 740 u3_agp = hose;
630} 741}
631 742
743static void __init setup_u4_pcie(struct pci_controller* hose)
744{
745 /* We currently only implement the "non-atomic" config space, to
746 * be optimised later.
747 */
748 hose->ops = &u4_pcie_pci_ops;
749 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
750 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
751
752 /* The bus contains a bridge from root -> device, we need to
753 * make it visible on bus 0 so that we pick the right type
754 * of config cycles. If we didn't, we would have to force all
755 * config cycles to be type 1. So we override the "bus-range"
756 * property here
757 */
758 hose->first_busno = 0x00;
759 hose->last_busno = 0xff;
760 u4_pcie = hose;
761}
762
632static void __init setup_u3_ht(struct pci_controller* hose) 763static void __init setup_u3_ht(struct pci_controller* hose)
633{ 764{
634 struct device_node *np = (struct device_node *)hose->arch_data; 765 struct device_node *np = (struct device_node *)hose->arch_data;
766 struct pci_controller *other = NULL;
635 int i, cur; 767 int i, cur;
636 768
769
637 hose->ops = &u3_ht_pci_ops; 770 hose->ops = &u3_ht_pci_ops;
638 771
639 /* We hard code the address because of the different size of 772 /* We hard code the address because of the different size of
@@ -667,11 +800,20 @@ static void __init setup_u3_ht(struct pci_controller* hose)
667 800
668 u3_ht = hose; 801 u3_ht = hose;
669 802
670 if (u3_agp == NULL) { 803 if (u3_agp != NULL)
671 DBG("U3 has no AGP, using full resource range\n"); 804 other = u3_agp;
805 else if (u4_pcie != NULL)
806 other = u4_pcie;
807
808 if (other == NULL) {
809 DBG("U3/4 has no AGP/PCIE, using full resource range\n");
672 return; 810 return;
673 } 811 }
674 812
813 /* Fixup bus range vs. PCIE */
814 if (u4_pcie)
815 hose->last_busno = u4_pcie->first_busno - 1;
816
675 /* We "remove" the AGP resources from the resources allocated to HT, 817 /* We "remove" the AGP resources from the resources allocated to HT,
676 * that is we create "holes". However, that code does assumptions 818 * that is we create "holes". However, that code does assumptions
677 * that so far happen to be true (cross fingers...), typically that 819 * that so far happen to be true (cross fingers...), typically that
@@ -679,7 +821,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
679 */ 821 */
680 cur = 0; 822 cur = 0;
681 for (i=0; i<3; i++) { 823 for (i=0; i<3; i++) {
682 struct resource *res = &u3_agp->mem_resources[i]; 824 struct resource *res = &other->mem_resources[i];
683 if (res->flags != IORESOURCE_MEM) 825 if (res->flags != IORESOURCE_MEM)
684 continue; 826 continue;
685 /* We don't care about "fine" resources */ 827 /* We don't care about "fine" resources */
@@ -722,7 +864,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
722 hose->mem_resources[cur-1].end = res->start - 1; 864 hose->mem_resources[cur-1].end = res->start - 1;
723 } 865 }
724} 866}
725#endif 867#endif /* CONFIG_PPC64 */
726 868
727/* 869/*
728 * We assume that if we have a G3 powermac, we have one bridge called 870 * We assume that if we have a G3 powermac, we have one bridge called
@@ -733,24 +875,17 @@ static int __init add_bridge(struct device_node *dev)
733{ 875{
734 int len; 876 int len;
735 struct pci_controller *hose; 877 struct pci_controller *hose;
736#ifdef CONFIG_PPC32 878 struct resource rsrc;
737 struct reg_property *addr;
738#endif
739 char *disp_name; 879 char *disp_name;
740 int *bus_range; 880 int *bus_range;
741 int primary = 1; 881 int primary = 1, has_address = 0;
742 882
743 DBG("Adding PCI host bridge %s\n", dev->full_name); 883 DBG("Adding PCI host bridge %s\n", dev->full_name);
744 884
745#ifdef CONFIG_PPC32 885 /* Fetch host bridge registers address */
746 /* XXX fix this */ 886 has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
747 addr = (struct reg_property *) get_property(dev, "reg", &len); 887
748 if (addr == NULL || len < sizeof(*addr)) { 888 /* Get bus range if any */
749 printk(KERN_WARNING "Can't use %s: no address\n",
750 dev->full_name);
751 return -ENODEV;
752 }
753#endif
754 bus_range = (int *) get_property(dev, "bus-range", &len); 889 bus_range = (int *) get_property(dev, "bus-range", &len);
755 if (bus_range == NULL || len < 2 * sizeof(int)) { 890 if (bus_range == NULL || len < 2 * sizeof(int)) {
756 printk(KERN_WARNING "Can't get bus-range for %s, assume" 891 printk(KERN_WARNING "Can't get bus-range for %s, assume"
@@ -770,6 +905,8 @@ static int __init add_bridge(struct device_node *dev)
770 hose->last_busno = bus_range ? bus_range[1] : 0xff; 905 hose->last_busno = bus_range ? bus_range[1] : 0xff;
771 906
772 disp_name = NULL; 907 disp_name = NULL;
908
909 /* 64 bits only bridges */
773#ifdef CONFIG_PPC64 910#ifdef CONFIG_PPC64
774 if (device_is_compatible(dev, "u3-agp")) { 911 if (device_is_compatible(dev, "u3-agp")) {
775 setup_u3_agp(hose); 912 setup_u3_agp(hose);
@@ -779,28 +916,37 @@ static int __init add_bridge(struct device_node *dev)
779 setup_u3_ht(hose); 916 setup_u3_ht(hose);
780 disp_name = "U3-HT"; 917 disp_name = "U3-HT";
781 primary = 1; 918 primary = 1;
919 } else if (device_is_compatible(dev, "u4-pcie")) {
920 setup_u4_pcie(hose);
921 disp_name = "U4-PCIE";
922 primary = 0;
782 } 923 }
783 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", 924 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:"
784 disp_name, hose->first_busno, hose->last_busno); 925 " %d->%d\n", disp_name, hose->first_busno, hose->last_busno);
785#else 926#endif /* CONFIG_PPC64 */
927
928 /* 32 bits only bridges */
929#ifdef CONFIG_PPC32
786 if (device_is_compatible(dev, "uni-north")) { 930 if (device_is_compatible(dev, "uni-north")) {
787 primary = setup_uninorth(hose, addr); 931 primary = setup_uninorth(hose, &rsrc);
788 disp_name = "UniNorth"; 932 disp_name = "UniNorth";
789 } else if (strcmp(dev->name, "pci") == 0) { 933 } else if (strcmp(dev->name, "pci") == 0) {
790 /* XXX assume this is a mpc106 (grackle) */ 934 /* XXX assume this is a mpc106 (grackle) */
791 setup_grackle(hose); 935 setup_grackle(hose);
792 disp_name = "Grackle (MPC106)"; 936 disp_name = "Grackle (MPC106)";
793 } else if (strcmp(dev->name, "bandit") == 0) { 937 } else if (strcmp(dev->name, "bandit") == 0) {
794 setup_bandit(hose, addr); 938 setup_bandit(hose, &rsrc);
795 disp_name = "Bandit"; 939 disp_name = "Bandit";
796 } else if (strcmp(dev->name, "chaos") == 0) { 940 } else if (strcmp(dev->name, "chaos") == 0) {
797 setup_chaos(hose, addr); 941 setup_chaos(hose, &rsrc);
798 disp_name = "Chaos"; 942 disp_name = "Chaos";
799 primary = 0; 943 primary = 0;
800 } 944 }
801 printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n", 945 printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. "
802 disp_name, addr->address, hose->first_busno, hose->last_busno); 946 "Firmware bus number: %d->%d\n",
803#endif 947 disp_name, rsrc.start, hose->first_busno, hose->last_busno);
948#endif /* CONFIG_PPC32 */
949
804 DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", 950 DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
805 hose, hose->cfg_addr, hose->cfg_data); 951 hose, hose->cfg_addr, hose->cfg_data);
806 952
@@ -814,8 +960,7 @@ static int __init add_bridge(struct device_node *dev)
814 return 0; 960 return 0;
815} 961}
816 962
817static void __init 963static void __init pcibios_fixup_OF_interrupts(void)
818pcibios_fixup_OF_interrupts(void)
819{ 964{
820 struct pci_dev* dev = NULL; 965 struct pci_dev* dev = NULL;
821 966
@@ -835,8 +980,7 @@ pcibios_fixup_OF_interrupts(void)
835 } 980 }
836} 981}
837 982
838void __init 983void __init pmac_pcibios_fixup(void)
839pmac_pcibios_fixup(void)
840{ 984{
841 /* Fixup interrupts according to OF tree */ 985 /* Fixup interrupts according to OF tree */
842 pcibios_fixup_OF_interrupts(); 986 pcibios_fixup_OF_interrupts();
@@ -899,6 +1043,8 @@ void __init pmac_pci_init(void)
899 pci_setup_phb_io(u3_ht, 1); 1043 pci_setup_phb_io(u3_ht, 1);
900 if (u3_agp) 1044 if (u3_agp)
901 pci_setup_phb_io(u3_agp, 0); 1045 pci_setup_phb_io(u3_agp, 0);
1046 if (u4_pcie)
1047 pci_setup_phb_io(u4_pcie, 0);
902 1048
903 /* 1049 /*
904 * On ppc64, fixup the IO resources on our host bridges as 1050 * On ppc64, fixup the IO resources on our host bridges as
@@ -911,7 +1057,8 @@ void __init pmac_pci_init(void)
911 1057
912 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We 1058 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
913 * assume there is no P2P bridge on the AGP bus, which should be a 1059 * assume there is no P2P bridge on the AGP bus, which should be a
914 * safe assumptions hopefully. 1060 * safe assumptions for now. We should do something better in the
1061 * future though
915 */ 1062 */
916 if (u3_agp) { 1063 if (u3_agp) {
917 struct device_node *np = u3_agp->arch_data; 1064 struct device_node *np = u3_agp->arch_data;
@@ -919,7 +1066,6 @@ void __init pmac_pci_init(void)
919 for (np = np->child; np; np = np->sibling) 1066 for (np = np->child; np; np = np->sibling)
920 PCI_DN(np)->busno = 0xf0; 1067 PCI_DN(np)->busno = 0xf0;
921 } 1068 }
922
923 /* pmac_check_ht_link(); */ 1069 /* pmac_check_ht_link(); */
924 1070
925 /* Tell pci.c to not use the common resource allocation mechanism */ 1071 /* Tell pci.c to not use the common resource allocation mechanism */
@@ -1126,7 +1272,8 @@ void pmac_pci_fixup_pciata(struct pci_dev* dev)
1126 good: 1272 good:
1127 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1273 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1128 if ((progif & 5) != 5) { 1274 if ((progif & 5) != 5) {
1129 printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev)); 1275 printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n",
1276 pci_name(dev));
1130 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); 1277 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
1131 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || 1278 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
1132 (progif & 5) != 5) 1279 (progif & 5) != 5)
@@ -1152,7 +1299,8 @@ static void fixup_k2_sata(struct pci_dev* dev)
1152 for (i = 0; i < 6; i++) { 1299 for (i = 0; i < 6; i++) {
1153 dev->resource[i].start = dev->resource[i].end = 0; 1300 dev->resource[i].start = dev->resource[i].end = 0;
1154 dev->resource[i].flags = 0; 1301 dev->resource[i].flags = 0;
1155 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); 1302 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
1303 0);
1156 } 1304 }
1157 } else { 1305 } else {
1158 pci_read_config_word(dev, PCI_COMMAND, &cmd); 1306 pci_read_config_word(dev, PCI_COMMAND, &cmd);
@@ -1161,7 +1309,8 @@ static void fixup_k2_sata(struct pci_dev* dev)
1161 for (i = 0; i < 5; i++) { 1309 for (i = 0; i < 5; i++) {
1162 dev->resource[i].start = dev->resource[i].end = 0; 1310 dev->resource[i].start = dev->resource[i].end = 0;
1163 dev->resource[i].flags = 0; 1311 dev->resource[i].flags = 0;
1164 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); 1312 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
1313 0);
1165 } 1314 }
1166 } 1315 }
1167} 1316}
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
new file mode 100644
index 00000000000..4ffd2a9832a
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -0,0 +1,405 @@
1#include <linux/config.h>
2#include <linux/types.h>
3#include <linux/init.h>
4#include <linux/delay.h>
5#include <linux/kernel.h>
6#include <linux/interrupt.h>
7#include <linux/spinlock.h>
8
9#include <asm/pmac_feature.h>
10#include <asm/pmac_pfunc.h>
11
12#define DBG(fmt...) printk(fmt)
13
14static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs)
15{
16 pmf_do_irq(data);
17
18 return IRQ_HANDLED;
19}
20
21static int macio_do_gpio_irq_enable(struct pmf_function *func)
22{
23 if (func->node->n_intrs < 1)
24 return -EINVAL;
25
26 return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0,
27 func->node->name, func);
28}
29
30static int macio_do_gpio_irq_disable(struct pmf_function *func)
31{
32 if (func->node->n_intrs < 1)
33 return -EINVAL;
34
35 free_irq(func->node->intrs[0].line, func);
36 return 0;
37}
38
39static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
40{
41 u8 __iomem *addr = (u8 __iomem *)func->driver_data;
42 unsigned long flags;
43 u8 tmp;
44
45 /* Check polarity */
46 if (args && args->count && !args->u[0].v)
47 value = ~value;
48
49 /* Toggle the GPIO */
50 spin_lock_irqsave(&feature_lock, flags);
51 tmp = readb(addr);
52 tmp = (tmp & ~mask) | (value & mask);
53 DBG("Do write 0x%02x to GPIO %s (%p)\n",
54 tmp, func->node->full_name, addr);
55 writeb(tmp, addr);
56 spin_unlock_irqrestore(&feature_lock, flags);
57
58 return 0;
59}
60
61static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor)
62{
63 u8 __iomem *addr = (u8 __iomem *)func->driver_data;
64 u32 value;
65
66 /* Check if we have room for reply */
67 if (args == NULL || args->count == 0 || args->u[0].p == NULL)
68 return -EINVAL;
69
70 value = readb(addr);
71 *args->u[0].p = ((value & mask) >> rshift) ^ xor;
72
73 return 0;
74}
75
76static int macio_do_delay(PMF_STD_ARGS, u32 duration)
77{
78 /* assume we can sleep ! */
79 msleep((duration + 999) / 1000);
80 return 0;
81}
82
83static struct pmf_handlers macio_gpio_handlers = {
84 .irq_enable = macio_do_gpio_irq_enable,
85 .irq_disable = macio_do_gpio_irq_disable,
86 .write_gpio = macio_do_gpio_write,
87 .read_gpio = macio_do_gpio_read,
88 .delay = macio_do_delay,
89};
90
91static void macio_gpio_init_one(struct macio_chip *macio)
92{
93 struct device_node *gparent, *gp;
94
95 /*
96 * Find the "gpio" parent node
97 */
98
99 for (gparent = NULL;
100 (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
101 if (strcmp(gparent->name, "gpio") == 0)
102 break;
103 if (gparent == NULL)
104 return;
105
106 DBG("Installing GPIO functions for macio %s\n",
107 macio->of_node->full_name);
108
109 /*
110 * Ok, got one, we dont need anything special to track them down, so
111 * we just create them all
112 */
113 for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
114 u32 *reg = (u32 *)get_property(gp, "reg", NULL);
115 unsigned long offset;
116 if (reg == NULL)
117 continue;
118 offset = *reg;
119 /* Deal with old style device-tree. We can safely hard code the
120 * offset for now too even if it's a bit gross ...
121 */
122 if (offset < 0x50)
123 offset += 0x50;
124 offset += (unsigned long)macio->base;
125 pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
126 }
127
128 DBG("Calling initial GPIO functions for macio %s\n",
129 macio->of_node->full_name);
130
131 /* And now we run all the init ones */
132 for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
133 pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
134
135 /* Note: We do not at this point implement the "at sleep" or "at wake"
136 * functions. I yet to find any for GPIOs anyway
137 */
138}
139
140static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
141{
142 struct macio_chip *macio = func->driver_data;
143 unsigned long flags;
144
145 spin_lock_irqsave(&feature_lock, flags);
146 MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
147 spin_unlock_irqrestore(&feature_lock, flags);
148 return 0;
149}
150
151static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset)
152{
153 struct macio_chip *macio = func->driver_data;
154
155 /* Check if we have room for reply */
156 if (args == NULL || args->count == 0 || args->u[0].p == NULL)
157 return -EINVAL;
158
159 *args->u[0].p = MACIO_IN32(offset);
160 return 0;
161}
162
163static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
164{
165 struct macio_chip *macio = func->driver_data;
166 unsigned long flags;
167
168 spin_lock_irqsave(&feature_lock, flags);
169 MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
170 spin_unlock_irqrestore(&feature_lock, flags);
171 return 0;
172}
173
174static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset)
175{
176 struct macio_chip *macio = func->driver_data;
177
178 /* Check if we have room for reply */
179 if (args == NULL || args->count == 0 || args->u[0].p == NULL)
180 return -EINVAL;
181
182 *((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
183 return 0;
184}
185
186static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
187 u32 shift, u32 xor)
188{
189 struct macio_chip *macio = func->driver_data;
190
191 /* Check if we have room for reply */
192 if (args == NULL || args->count == 0 || args->u[0].p == NULL)
193 return -EINVAL;
194
195 *args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
196 return 0;
197}
198
199static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
200 u32 shift, u32 xor)
201{
202 struct macio_chip *macio = func->driver_data;
203
204 /* Check if we have room for reply */
205 if (args == NULL || args->count == 0 || args->u[0].p == NULL)
206 return -EINVAL;
207
208 *((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
209 return 0;
210}
211
212static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
213 u32 mask)
214{
215 struct macio_chip *macio = func->driver_data;
216 unsigned long flags;
217 u32 tmp, val;
218
219 /* Check args */
220 if (args == NULL || args->count == 0)
221 return -EINVAL;
222
223 spin_lock_irqsave(&feature_lock, flags);
224 tmp = MACIO_IN32(offset);
225 val = args->u[0].v << shift;
226 tmp = (tmp & ~mask) | (val & mask);
227 MACIO_OUT32(offset, tmp);
228 spin_unlock_irqrestore(&feature_lock, flags);
229 return 0;
230}
231
232static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
233 u32 mask)
234{
235 struct macio_chip *macio = func->driver_data;
236 unsigned long flags;
237 u32 tmp, val;
238
239 /* Check args */
240 if (args == NULL || args->count == 0)
241 return -EINVAL;
242
243 spin_lock_irqsave(&feature_lock, flags);
244 tmp = MACIO_IN8(offset);
245 val = args->u[0].v << shift;
246 tmp = (tmp & ~mask) | (val & mask);
247 MACIO_OUT8(offset, tmp);
248 spin_unlock_irqrestore(&feature_lock, flags);
249 return 0;
250}
251
252static struct pmf_handlers macio_mmio_handlers = {
253 .write_reg32 = macio_do_write_reg32,
254 .read_reg32 = macio_do_read_reg32,
255 .write_reg8 = macio_do_write_reg8,
256 .read_reg32 = macio_do_read_reg8,
257 .read_reg32_msrx = macio_do_read_reg32_msrx,
258 .read_reg8_msrx = macio_do_read_reg8_msrx,
259 .write_reg32_slm = macio_do_write_reg32_slm,
260 .write_reg8_slm = macio_do_write_reg8_slm,
261 .delay = macio_do_delay,
262};
263
264static void macio_mmio_init_one(struct macio_chip *macio)
265{
266 DBG("Installing MMIO functions for macio %s\n",
267 macio->of_node->full_name);
268
269 pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
270}
271
272static struct device_node *unin_hwclock;
273
274static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
275{
276 unsigned long flags;
277
278 spin_lock_irqsave(&feature_lock, flags);
279 /* This is fairly bogus in darwin, but it should work for our needs
280 * implemeted that way:
281 */
282 UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
283 spin_unlock_irqrestore(&feature_lock, flags);
284 return 0;
285}
286
287
288static struct pmf_handlers unin_mmio_handlers = {
289 .write_reg32 = unin_do_write_reg32,
290 .delay = macio_do_delay,
291};
292
293static void uninorth_install_pfunc(void)
294{
295 struct device_node *np;
296
297 DBG("Installing functions for UniN %s\n",
298 uninorth_node->full_name);
299
300 /*
301 * Install handlers for the bridge itself
302 */
303 pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL);
304 pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
305
306
307 /*
308 * Install handlers for the hwclock child if any
309 */
310 for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
311 if (strcmp(np->name, "hw-clock") == 0) {
312 unin_hwclock = np;
313 break;
314 }
315 if (unin_hwclock) {
316 DBG("Installing functions for UniN clock %s\n",
317 unin_hwclock->full_name);
318 pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
319 pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
320 NULL);
321 }
322}
323
324/* We export this as the SMP code might init us early */
325int __init pmac_pfunc_base_install(void)
326{
327 static int pfbase_inited;
328 int i;
329
330 if (pfbase_inited)
331 return 0;
332 pfbase_inited = 1;
333
334
335 DBG("Installing base platform functions...\n");
336
337 /*
338 * Locate mac-io chips and install handlers
339 */
340 for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
341 if (macio_chips[i].of_node) {
342 macio_mmio_init_one(&macio_chips[i]);
343 macio_gpio_init_one(&macio_chips[i]);
344 }
345 }
346
347 /*
348 * Install handlers for northbridge and direct mapped hwclock
349 * if any. We do not implement the config space access callback
350 * which is only ever used for functions that we do not call in
351 * the current driver (enabling/disabling cells in U2, mostly used
352 * to restore the PCI settings, we do that differently)
353 */
354 if (uninorth_node && uninorth_base)
355 uninorth_install_pfunc();
356
357 DBG("All base functions installed\n");
358
359 return 0;
360}
361
362arch_initcall(pmac_pfunc_base_install);
363
364#ifdef CONFIG_PM
365
366/* Those can be called by pmac_feature. Ultimately, I should use a sysdev
367 * or a device, but for now, that's good enough until I sort out some
368 * ordering issues. Also, we do not bother with GPIOs, as so far I yet have
369 * to see a case where a GPIO function has the on-suspend or on-resume bit
370 */
371void pmac_pfunc_base_suspend(void)
372{
373 int i;
374
375 for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
376 if (macio_chips[i].of_node)
377 pmf_do_functions(macio_chips[i].of_node, NULL, 0,
378 PMF_FLAGS_ON_SLEEP, NULL);
379 }
380 if (uninorth_node)
381 pmf_do_functions(uninorth_node, NULL, 0,
382 PMF_FLAGS_ON_SLEEP, NULL);
383 if (unin_hwclock)
384 pmf_do_functions(unin_hwclock, NULL, 0,
385 PMF_FLAGS_ON_SLEEP, NULL);
386}
387
388void pmac_pfunc_base_resume(void)
389{
390 int i;
391
392 if (unin_hwclock)
393 pmf_do_functions(unin_hwclock, NULL, 0,
394 PMF_FLAGS_ON_WAKE, NULL);
395 if (uninorth_node)
396 pmf_do_functions(uninorth_node, NULL, 0,
397 PMF_FLAGS_ON_WAKE, NULL);
398 for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
399 if (macio_chips[i].of_node)
400 pmf_do_functions(macio_chips[i].of_node, NULL, 0,
401 PMF_FLAGS_ON_WAKE, NULL);
402 }
403}
404
405#endif /* CONFIG_PM */
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
new file mode 100644
index 00000000000..c32c623001d
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -0,0 +1,989 @@
1/*
2 *
3 * FIXME: Properly make this race free with refcounting etc...
4 *
5 * FIXME: LOCKING !!!
6 */
7
8#include <linux/config.h>
9#include <linux/init.h>
10#include <linux/delay.h>
11#include <linux/kernel.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14
15#include <asm/semaphore.h>
16#include <asm/prom.h>
17#include <asm/pmac_pfunc.h>
18
19/* Debug */
20#define LOG_PARSE(fmt...)
21#define LOG_ERROR(fmt...) printk(fmt)
22#define LOG_BLOB(t,b,c)
23#define DBG(fmt...) printk(fmt)
24
25/* Command numbers */
26#define PMF_CMD_LIST 0
27#define PMF_CMD_WRITE_GPIO 1
28#define PMF_CMD_READ_GPIO 2
29#define PMF_CMD_WRITE_REG32 3
30#define PMF_CMD_READ_REG32 4
31#define PMF_CMD_WRITE_REG16 5
32#define PMF_CMD_READ_REG16 6
33#define PMF_CMD_WRITE_REG8 7
34#define PMF_CMD_READ_REG8 8
35#define PMF_CMD_DELAY 9
36#define PMF_CMD_WAIT_REG32 10
37#define PMF_CMD_WAIT_REG16 11
38#define PMF_CMD_WAIT_REG8 12
39#define PMF_CMD_READ_I2C 13
40#define PMF_CMD_WRITE_I2C 14
41#define PMF_CMD_RMW_I2C 15
42#define PMF_CMD_GEN_I2C 16
43#define PMF_CMD_SHIFT_BYTES_RIGHT 17
44#define PMF_CMD_SHIFT_BYTES_LEFT 18
45#define PMF_CMD_READ_CFG 19
46#define PMF_CMD_WRITE_CFG 20
47#define PMF_CMD_RMW_CFG 21
48#define PMF_CMD_READ_I2C_SUBADDR 22
49#define PMF_CMD_WRITE_I2C_SUBADDR 23
50#define PMF_CMD_SET_I2C_MODE 24
51#define PMF_CMD_RMW_I2C_SUBADDR 25
52#define PMF_CMD_READ_REG32_MASK_SHR_XOR 26
53#define PMF_CMD_READ_REG16_MASK_SHR_XOR 27
54#define PMF_CMD_READ_REG8_MASK_SHR_XOR 28
55#define PMF_CMD_WRITE_REG32_SHL_MASK 29
56#define PMF_CMD_WRITE_REG16_SHL_MASK 30
57#define PMF_CMD_WRITE_REG8_SHL_MASK 31
58#define PMF_CMD_MASK_AND_COMPARE 32
59#define PMF_CMD_COUNT 33
60
61/* This structure holds the state of the parser while walking through
62 * a function definition
63 */
64struct pmf_cmd {
65 const void *cmdptr;
66 const void *cmdend;
67 struct pmf_function *func;
68 void *instdata;
69 struct pmf_args *args;
70 int error;
71};
72
73#if 0
74/* Debug output */
75static void print_blob(const char *title, const void *blob, int bytes)
76{
77 printk("%s", title);
78 while(bytes--) {
79 printk("%02x ", *((u8 *)blob));
80 blob += 1;
81 }
82 printk("\n");
83}
84#endif
85
86/*
87 * Parser helpers
88 */
89
90static u32 pmf_next32(struct pmf_cmd *cmd)
91{
92 u32 value;
93 if ((cmd->cmdend - cmd->cmdptr) < 4) {
94 cmd->error = 1;
95 return 0;
96 }
97 value = *((u32 *)cmd->cmdptr);
98 cmd->cmdptr += 4;
99 return value;
100}
101
102static const void* pmf_next_blob(struct pmf_cmd *cmd, int count)
103{
104 const void *value;
105 if ((cmd->cmdend - cmd->cmdptr) < count) {
106 cmd->error = 1;
107 return NULL;
108 }
109 value = cmd->cmdptr;
110 cmd->cmdptr += count;
111 return value;
112}
113
114/*
115 * Individual command parsers
116 */
117
118#define PMF_PARSE_CALL(name, cmd, handlers, p...) \
119 do { \
120 if (cmd->error) \
121 return -ENXIO; \
122 if (handlers == NULL) \
123 return 0; \
124 if (handlers->name) \
125 return handlers->name(cmd->func, cmd->instdata, \
126 cmd->args, p); \
127 return -1; \
128 } while(0) \
129
130
131static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
132{
133 u8 value = (u8)pmf_next32(cmd);
134 u8 mask = (u8)pmf_next32(cmd);
135
136 LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask);
137
138 PMF_PARSE_CALL(write_gpio, cmd, h, value, mask);
139}
140
141static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
142{
143 u8 mask = (u8)pmf_next32(cmd);
144 int rshift = (int)pmf_next32(cmd);
145 u8 xor = (u8)pmf_next32(cmd);
146
147 LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n",
148 mask, rshift, xor);
149
150 PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor);
151}
152
153static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
154{
155 u32 offset = pmf_next32(cmd);
156 u32 value = pmf_next32(cmd);
157 u32 mask = pmf_next32(cmd);
158
159 LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n",
160 offset, value, mask);
161
162 PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask);
163}
164
165static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
166{
167 u32 offset = pmf_next32(cmd);
168
169 LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset);
170
171 PMF_PARSE_CALL(read_reg32, cmd, h, offset);
172}
173
174
175static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
176{
177 u32 offset = pmf_next32(cmd);
178 u16 value = (u16)pmf_next32(cmd);
179 u16 mask = (u16)pmf_next32(cmd);
180
181 LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n",
182 offset, value, mask);
183
184 PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask);
185}
186
187static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
188{
189 u32 offset = pmf_next32(cmd);
190
191 LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset);
192
193 PMF_PARSE_CALL(read_reg16, cmd, h, offset);
194}
195
196
197static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
198{
199 u32 offset = pmf_next32(cmd);
200 u8 value = (u16)pmf_next32(cmd);
201 u8 mask = (u16)pmf_next32(cmd);
202
203 LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n",
204 offset, value, mask);
205
206 PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask);
207}
208
209static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
210{
211 u32 offset = pmf_next32(cmd);
212
213 LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset);
214
215 PMF_PARSE_CALL(read_reg8, cmd, h, offset);
216}
217
218static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h)
219{
220 u32 duration = pmf_next32(cmd);
221
222 LOG_PARSE("pmf: delay(duration: %d us)\n", duration);
223
224 PMF_PARSE_CALL(delay, cmd, h, duration);
225}
226
227static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
228{
229 u32 offset = pmf_next32(cmd);
230 u32 value = pmf_next32(cmd);
231 u32 mask = pmf_next32(cmd);
232
233 LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n",
234 offset, value, mask);
235
236 PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask);
237}
238
239static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
240{
241 u32 offset = pmf_next32(cmd);
242 u16 value = (u16)pmf_next32(cmd);
243 u16 mask = (u16)pmf_next32(cmd);
244
245 LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n",
246 offset, value, mask);
247
248 PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask);
249}
250
251static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
252{
253 u32 offset = pmf_next32(cmd);
254 u8 value = (u8)pmf_next32(cmd);
255 u8 mask = (u8)pmf_next32(cmd);
256
257 LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n",
258 offset, value, mask);
259
260 PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask);
261}
262
263static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
264{
265 u32 bytes = pmf_next32(cmd);
266
267 LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes);
268
269 PMF_PARSE_CALL(read_i2c, cmd, h, bytes);
270}
271
272static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
273{
274 u32 bytes = pmf_next32(cmd);
275 const void *blob = pmf_next_blob(cmd, bytes);
276
277 LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes);
278 LOG_BLOB("pmf: data: \n", blob, bytes);
279
280 PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob);
281}
282
283
284static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
285{
286 u32 maskbytes = pmf_next32(cmd);
287 u32 valuesbytes = pmf_next32(cmd);
288 u32 totalbytes = pmf_next32(cmd);
289 const void *maskblob = pmf_next_blob(cmd, maskbytes);
290 const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
291
292 LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, "
293 "totalbytes: %d) ...\n",
294 maskbytes, valuesbytes, totalbytes);
295 LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
296 LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
297
298 PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes,
299 maskblob, valuesblob);
300}
301
302static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
303{
304 u32 offset = pmf_next32(cmd);
305 u32 bytes = pmf_next32(cmd);
306
307 LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
308
309 PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes);
310}
311
312
313static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
314{
315 u32 offset = pmf_next32(cmd);
316 u32 bytes = pmf_next32(cmd);
317 const void *blob = pmf_next_blob(cmd, bytes);
318
319 LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
320 LOG_BLOB("pmf: data: \n", blob, bytes);
321
322 PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob);
323}
324
325static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
326{
327 u32 offset = pmf_next32(cmd);
328 u32 maskbytes = pmf_next32(cmd);
329 u32 valuesbytes = pmf_next32(cmd);
330 u32 totalbytes = pmf_next32(cmd);
331 const void *maskblob = pmf_next_blob(cmd, maskbytes);
332 const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
333
334 LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud,"
335 " totalbytes: %d) ...\n",
336 maskbytes, valuesbytes, totalbytes);
337 LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
338 LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
339
340 PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes,
341 totalbytes, maskblob, valuesblob);
342}
343
344
345static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
346{
347 u8 subaddr = (u8)pmf_next32(cmd);
348 u32 bytes = pmf_next32(cmd);
349
350 LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n",
351 subaddr, bytes);
352
353 PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes);
354}
355
356static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
357{
358 u8 subaddr = (u8)pmf_next32(cmd);
359 u32 bytes = pmf_next32(cmd);
360 const void *blob = pmf_next_blob(cmd, bytes);
361
362 LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n",
363 subaddr, bytes);
364 LOG_BLOB("pmf: data: \n", blob, bytes);
365
366 PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob);
367}
368
369static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h)
370{
371 u32 mode = pmf_next32(cmd);
372
373 LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode);
374
375 PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode);
376}
377
378
379static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
380{
381 u8 subaddr = (u8)pmf_next32(cmd);
382 u32 maskbytes = pmf_next32(cmd);
383 u32 valuesbytes = pmf_next32(cmd);
384 u32 totalbytes = pmf_next32(cmd);
385 const void *maskblob = pmf_next_blob(cmd, maskbytes);
386 const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
387
388 LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud"
389 ", totalbytes: %d) ...\n",
390 subaddr, maskbytes, valuesbytes, totalbytes);
391 LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
392 LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
393
394 PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes,
395 totalbytes, maskblob, valuesblob);
396}
397
398static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd,
399 struct pmf_handlers *h)
400{
401 u32 offset = pmf_next32(cmd);
402 u32 mask = pmf_next32(cmd);
403 u32 shift = pmf_next32(cmd);
404 u32 xor = pmf_next32(cmd);
405
406 LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x,"
407 " xor: %x\n", offset, mask, shift, xor);
408
409 PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor);
410}
411
412static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd,
413 struct pmf_handlers *h)
414{
415 u32 offset = pmf_next32(cmd);
416 u32 mask = pmf_next32(cmd);
417 u32 shift = pmf_next32(cmd);
418 u32 xor = pmf_next32(cmd);
419
420 LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x,"
421 " xor: %x\n", offset, mask, shift, xor);
422
423 PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor);
424}
425static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd,
426 struct pmf_handlers *h)
427{
428 u32 offset = pmf_next32(cmd);
429 u32 mask = pmf_next32(cmd);
430 u32 shift = pmf_next32(cmd);
431 u32 xor = pmf_next32(cmd);
432
433 LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x,"
434 " xor: %x\n", offset, mask, shift, xor);
435
436 PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor);
437}
438
439static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd,
440 struct pmf_handlers *h)
441{
442 u32 offset = pmf_next32(cmd);
443 u32 shift = pmf_next32(cmd);
444 u32 mask = pmf_next32(cmd);
445
446 LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n",
447 offset, shift, mask);
448
449 PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask);
450}
451
452static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd,
453 struct pmf_handlers *h)
454{
455 u32 offset = pmf_next32(cmd);
456 u32 shift = pmf_next32(cmd);
457 u32 mask = pmf_next32(cmd);
458
459 LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n",
460 offset, shift, mask);
461
462 PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask);
463}
464
465static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd,
466 struct pmf_handlers *h)
467{
468 u32 offset = pmf_next32(cmd);
469 u32 shift = pmf_next32(cmd);
470 u32 mask = pmf_next32(cmd);
471
472 LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n",
473 offset, shift, mask);
474
475 PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask);
476}
477
478static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd,
479 struct pmf_handlers *h)
480{
481 u32 bytes = pmf_next32(cmd);
482 const void *maskblob = pmf_next_blob(cmd, bytes);
483 const void *valuesblob = pmf_next_blob(cmd, bytes);
484
485 LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes);
486 LOG_BLOB("pmf: mask data: \n", maskblob, bytes);
487 LOG_BLOB("pmf: values data: \n", valuesblob, bytes);
488
489 PMF_PARSE_CALL(mask_and_compare, cmd, h,
490 bytes, maskblob, valuesblob);
491}
492
493
494typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h);
495
496static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] =
497{
498 NULL,
499 pmf_parser_write_gpio,
500 pmf_parser_read_gpio,
501 pmf_parser_write_reg32,
502 pmf_parser_read_reg32,
503 pmf_parser_write_reg16,
504 pmf_parser_read_reg16,
505 pmf_parser_write_reg8,
506 pmf_parser_read_reg8,
507 pmf_parser_delay,
508 pmf_parser_wait_reg32,
509 pmf_parser_wait_reg16,
510 pmf_parser_wait_reg8,
511 pmf_parser_read_i2c,
512 pmf_parser_write_i2c,
513 pmf_parser_rmw_i2c,
514 NULL, /* Bogus command */
515 NULL, /* Shift bytes right: NYI */
516 NULL, /* Shift bytes left: NYI */
517 pmf_parser_read_cfg,
518 pmf_parser_write_cfg,
519 pmf_parser_rmw_cfg,
520 pmf_parser_read_i2c_sub,
521 pmf_parser_write_i2c_sub,
522 pmf_parser_set_i2c_mode,
523 pmf_parser_rmw_i2c_sub,
524 pmf_parser_read_reg32_msrx,
525 pmf_parser_read_reg16_msrx,
526 pmf_parser_read_reg8_msrx,
527 pmf_parser_write_reg32_slm,
528 pmf_parser_write_reg16_slm,
529 pmf_parser_write_reg8_slm,
530 pmf_parser_mask_and_compare,
531};
532
533struct pmf_device {
534 struct list_head link;
535 struct device_node *node;
536 struct pmf_handlers *handlers;
537 struct list_head functions;
538 struct kref ref;
539};
540
541static LIST_HEAD(pmf_devices);
542static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
543
544static void pmf_release_device(struct kref *kref)
545{
546 struct pmf_device *dev = container_of(kref, struct pmf_device, ref);
547 kfree(dev);
548}
549
550static inline void pmf_put_device(struct pmf_device *dev)
551{
552 kref_put(&dev->ref, pmf_release_device);
553}
554
555static inline struct pmf_device *pmf_get_device(struct pmf_device *dev)
556{
557 kref_get(&dev->ref);
558 return dev;
559}
560
561static inline struct pmf_device *pmf_find_device(struct device_node *np)
562{
563 struct pmf_device *dev;
564
565 list_for_each_entry(dev, &pmf_devices, link) {
566 if (dev->node == np)
567 return pmf_get_device(dev);
568 }
569 return NULL;
570}
571
572static int pmf_parse_one(struct pmf_function *func,
573 struct pmf_handlers *handlers,
574 void *instdata, struct pmf_args *args)
575{
576 struct pmf_cmd cmd;
577 u32 ccode;
578 int count, rc;
579
580 cmd.cmdptr = func->data;
581 cmd.cmdend = func->data + func->length;
582 cmd.func = func;
583 cmd.instdata = instdata;
584 cmd.args = args;
585 cmd.error = 0;
586
587 LOG_PARSE("pmf: func %s, %d bytes, %s...\n",
588 func->name, func->length,
589 handlers ? "executing" : "parsing");
590
591 /* One subcommand to parse for now */
592 count = 1;
593
594 while(count-- && cmd.cmdptr < cmd.cmdend) {
595 /* Get opcode */
596 ccode = pmf_next32(&cmd);
597 /* Check if we are hitting a command list, fetch new count */
598 if (ccode == 0) {
599 count = pmf_next32(&cmd) - 1;
600 ccode = pmf_next32(&cmd);
601 }
602 if (cmd.error) {
603 LOG_ERROR("pmf: parse error, not enough data\n");
604 return -ENXIO;
605 }
606 if (ccode >= PMF_CMD_COUNT) {
607 LOG_ERROR("pmf: command code %d unknown !\n", ccode);
608 return -ENXIO;
609 }
610 if (pmf_parsers[ccode] == NULL) {
611 LOG_ERROR("pmf: no parser for command %d !\n", ccode);
612 return -ENXIO;
613 }
614 rc = pmf_parsers[ccode](&cmd, handlers);
615 if (rc != 0) {
616 LOG_ERROR("pmf: parser for command %d returned"
617 " error %d\n", ccode, rc);
618 return rc;
619 }
620 }
621
622 /* We are doing an initial parse pass, we need to adjust the size */
623 if (handlers == NULL)
624 func->length = cmd.cmdptr - func->data;
625
626 return 0;
627}
628
629static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata,
630 const char *name, u32 *data,
631 unsigned int length)
632{
633 int count = 0;
634 struct pmf_function *func = NULL;
635
636 DBG("pmf: Adding functions for platform-do-%s\n", name);
637
638 while (length >= 12) {
639 /* Allocate a structure */
640 func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL);
641 if (func == NULL)
642 goto bail;
643 kref_init(&func->ref);
644 INIT_LIST_HEAD(&func->irq_clients);
645 func->node = dev->node;
646 func->driver_data = driverdata;
647 func->name = name;
648 func->phandle = data[0];
649 func->flags = data[1];
650 data += 2;
651 length -= 8;
652 func->data = data;
653 func->length = length;
654 func->dev = dev;
655 DBG("pmf: idx %d: flags=%08x, phandle=%08x "
656 " %d bytes remaining, parsing...\n",
657 count+1, func->flags, func->phandle, length);
658 if (pmf_parse_one(func, NULL, NULL, NULL)) {
659 kfree(func);
660 goto bail;
661 }
662 length -= func->length;
663 data = (u32 *)(((u8 *)data) + func->length);
664 list_add(&func->link, &dev->functions);
665 pmf_get_device(dev);
666 count++;
667 }
668 bail:
669 DBG("pmf: Added %d functions\n", count);
670
671 return count;
672}
673
674static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
675{
676 struct property *pp;
677#define PP_PREFIX "platform-do-"
678 const int plen = strlen(PP_PREFIX);
679 int count = 0;
680
681 for (pp = dev->node->properties; pp != 0; pp = pp->next) {
682 char *name;
683 if (strncmp(pp->name, PP_PREFIX, plen) != 0)
684 continue;
685 name = pp->name + plen;
686 if (strlen(name) && pp->length >= 12)
687 count += pmf_add_function_prop(dev, driverdata, name,
688 (u32 *)pp->value,
689 pp->length);
690 }
691 return count;
692}
693
694
695int pmf_register_driver(struct device_node *np,
696 struct pmf_handlers *handlers,
697 void *driverdata)
698{
699 struct pmf_device *dev;
700 unsigned long flags;
701 int rc = 0;
702
703 if (handlers == NULL)
704 return -EINVAL;
705
706 DBG("pmf: registering driver for node %s\n", np->full_name);
707
708 spin_lock_irqsave(&pmf_lock, flags);
709 dev = pmf_find_device(np);
710 spin_unlock_irqrestore(&pmf_lock, flags);
711 if (dev != NULL) {
712 DBG("pmf: already there !\n");
713 pmf_put_device(dev);
714 return -EBUSY;
715 }
716
717 dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL);
718 if (dev == NULL) {
719 DBG("pmf: no memory !\n");
720 return -ENOMEM;
721 }
722 kref_init(&dev->ref);
723 dev->node = of_node_get(np);
724 dev->handlers = handlers;
725 INIT_LIST_HEAD(&dev->functions);
726
727 rc = pmf_add_functions(dev, driverdata);
728 if (rc == 0) {
729 DBG("pmf: no functions, disposing.. \n");
730 of_node_put(np);
731 kfree(dev);
732 return -ENODEV;
733 }
734
735 spin_lock_irqsave(&pmf_lock, flags);
736 list_add(&dev->link, &pmf_devices);
737 spin_unlock_irqrestore(&pmf_lock, flags);
738
739 return 0;
740}
741EXPORT_SYMBOL_GPL(pmf_register_driver);
742
743struct pmf_function *pmf_get_function(struct pmf_function *func)
744{
745 if (!try_module_get(func->dev->handlers->owner))
746 return NULL;
747 kref_get(&func->ref);
748 return func;
749}
750EXPORT_SYMBOL_GPL(pmf_get_function);
751
752static void pmf_release_function(struct kref *kref)
753{
754 struct pmf_function *func =
755 container_of(kref, struct pmf_function, ref);
756 pmf_put_device(func->dev);
757 kfree(func);
758}
759
760static inline void __pmf_put_function(struct pmf_function *func)
761{
762 kref_put(&func->ref, pmf_release_function);
763}
764
765void pmf_put_function(struct pmf_function *func)
766{
767 if (func == NULL)
768 return;
769 module_put(func->dev->handlers->owner);
770 __pmf_put_function(func);
771}
772EXPORT_SYMBOL_GPL(pmf_put_function);
773
774void pmf_unregister_driver(struct device_node *np)
775{
776 struct pmf_device *dev;
777 unsigned long flags;
778
779 DBG("pmf: unregistering driver for node %s\n", np->full_name);
780
781 spin_lock_irqsave(&pmf_lock, flags);
782 dev = pmf_find_device(np);
783 if (dev == NULL) {
784 DBG("pmf: not such driver !\n");
785 spin_unlock_irqrestore(&pmf_lock, flags);
786 return;
787 }
788 list_del(&dev->link);
789
790 while(!list_empty(&dev->functions)) {
791 struct pmf_function *func =
792 list_entry(dev->functions.next, typeof(*func), link);
793 list_del(&func->link);
794 __pmf_put_function(func);
795 }
796
797 pmf_put_device(dev);
798 spin_unlock_irqrestore(&pmf_lock, flags);
799}
800EXPORT_SYMBOL_GPL(pmf_unregister_driver);
801
802struct pmf_function *__pmf_find_function(struct device_node *target,
803 const char *name, u32 flags)
804{
805 struct device_node *actor = of_node_get(target);
806 struct pmf_device *dev;
807 struct pmf_function *func, *result = NULL;
808 char fname[64];
809 u32 *prop, ph;
810
811 /*
812 * Look for a "platform-*" function reference. If we can't find
813 * one, then we fallback to a direct call attempt
814 */
815 snprintf(fname, 63, "platform-%s", name);
816 prop = (u32 *)get_property(target, fname, NULL);
817 if (prop == NULL)
818 goto find_it;
819 ph = *prop;
820 if (ph == 0)
821 goto find_it;
822
823 /*
824 * Ok, now try to find the actor. If we can't find it, we fail,
825 * there is no point in falling back there
826 */
827 of_node_put(actor);
828 actor = of_find_node_by_phandle(ph);
829 if (actor == NULL)
830 return NULL;
831 find_it:
832 dev = pmf_find_device(actor);
833 if (dev == NULL)
834 return NULL;
835
836 list_for_each_entry(func, &dev->functions, link) {
837 if (name && strcmp(name, func->name))
838 continue;
839 if (func->phandle && target->node != func->phandle)
840 continue;
841 if ((func->flags & flags) == 0)
842 continue;
843 result = func;
844 break;
845 }
846 of_node_put(actor);
847 pmf_put_device(dev);
848 return result;
849}
850
851
852int pmf_register_irq_client(struct device_node *target,
853 const char *name,
854 struct pmf_irq_client *client)
855{
856 struct pmf_function *func;
857 unsigned long flags;
858
859 spin_lock_irqsave(&pmf_lock, flags);
860 func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
861 if (func == NULL) {
862 spin_unlock_irqrestore(&pmf_lock, flags);
863 return -ENODEV;
864 }
865 list_add(&client->link, &func->irq_clients);
866 spin_unlock_irqrestore(&pmf_lock, flags);
867
868 return 0;
869}
870EXPORT_SYMBOL_GPL(pmf_register_irq_client);
871
872void pmf_unregister_irq_client(struct device_node *np,
873 const char *name,
874 struct pmf_irq_client *client)
875{
876 unsigned long flags;
877
878 spin_lock_irqsave(&pmf_lock, flags);
879 list_del(&client->link);
880 spin_unlock_irqrestore(&pmf_lock, flags);
881}
882EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
883
884
885void pmf_do_irq(struct pmf_function *func)
886{
887 unsigned long flags;
888 struct pmf_irq_client *client;
889
890 /* For now, using a spinlock over the whole function. Can be made
891 * to drop the lock using 2 lists if necessary
892 */
893 spin_lock_irqsave(&pmf_lock, flags);
894 list_for_each_entry(client, &func->irq_clients, link) {
895 if (!try_module_get(client->owner))
896 continue;
897 client->handler(client->data);
898 module_put(client->owner);
899 }
900 spin_unlock_irqrestore(&pmf_lock, flags);
901}
902EXPORT_SYMBOL_GPL(pmf_do_irq);
903
904
905int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
906{
907 struct pmf_device *dev = func->dev;
908 void *instdata = NULL;
909 int rc = 0;
910
911 DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
912
913 if (dev->handlers->begin)
914 instdata = dev->handlers->begin(func, args);
915 rc = pmf_parse_one(func, dev->handlers, instdata, args);
916 if (dev->handlers->end)
917 dev->handlers->end(func, instdata);
918
919 return rc;
920}
921EXPORT_SYMBOL_GPL(pmf_call_one);
922
923int pmf_do_functions(struct device_node *np, const char *name,
924 u32 phandle, u32 fflags, struct pmf_args *args)
925{
926 struct pmf_device *dev;
927 struct pmf_function *func, *tmp;
928 unsigned long flags;
929 int rc = -ENODEV;
930
931 spin_lock_irqsave(&pmf_lock, flags);
932
933 dev = pmf_find_device(np);
934 if (dev == NULL) {
935 spin_unlock_irqrestore(&pmf_lock, flags);
936 return -ENODEV;
937 }
938 list_for_each_entry_safe(func, tmp, &dev->functions, link) {
939 if (name && strcmp(name, func->name))
940 continue;
941 if (phandle && func->phandle && phandle != func->phandle)
942 continue;
943 if ((func->flags & fflags) == 0)
944 continue;
945 if (pmf_get_function(func) == NULL)
946 continue;
947 spin_unlock_irqrestore(&pmf_lock, flags);
948 rc = pmf_call_one(func, args);
949 pmf_put_function(func);
950 spin_lock_irqsave(&pmf_lock, flags);
951 }
952 pmf_put_device(dev);
953 spin_unlock_irqrestore(&pmf_lock, flags);
954
955 return rc;
956}
957EXPORT_SYMBOL_GPL(pmf_do_functions);
958
959
960struct pmf_function *pmf_find_function(struct device_node *target,
961 const char *name)
962{
963 struct pmf_function *func;
964 unsigned long flags;
965
966 spin_lock_irqsave(&pmf_lock, flags);
967 func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND);
968 if (func)
969 func = pmf_get_function(func);
970 spin_unlock_irqrestore(&pmf_lock, flags);
971 return func;
972}
973EXPORT_SYMBOL_GPL(pmf_find_function);
974
975int pmf_call_function(struct device_node *target, const char *name,
976 struct pmf_args *args)
977{
978 struct pmf_function *func = pmf_find_function(target, name);
979 int rc;
980
981 if (func == NULL)
982 return -ENODEV;
983
984 rc = pmf_call_one(func, args);
985 pmf_put_function(func);
986 return rc;
987}
988EXPORT_SYMBOL_GPL(pmf_call_function);
989
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 90040c49494..18bf3011d1e 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -5,8 +5,8 @@
5 * in a separate file 5 * in a separate file
6 * 6 *
7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) 7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8 * 8 * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
9 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) 9 * IBM, Corp.
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -54,12 +54,7 @@ struct pmac_irq_hw {
54}; 54};
55 55
56/* Default addresses */ 56/* Default addresses */
57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = { 57static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
58 (struct pmac_irq_hw *) 0xf3000020,
59 (struct pmac_irq_hw *) 0xf3000010,
60 (struct pmac_irq_hw *) 0xf4000020,
61 (struct pmac_irq_hw *) 0xf4000010,
62};
63 58
64#define GC_LEVEL_MASK 0x3ff00000 59#define GC_LEVEL_MASK 0x3ff00000
65#define OHARE_LEVEL_MASK 0x1ff00000 60#define OHARE_LEVEL_MASK 0x1ff00000
@@ -82,8 +77,7 @@ static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
82 * since it can lose interrupts (see pmac_set_irq_mask). 77 * since it can lose interrupts (see pmac_set_irq_mask).
83 * -- Cort 78 * -- Cort
84 */ 79 */
85void 80void __set_lost(unsigned long irq_nr, int nokick)
86__set_lost(unsigned long irq_nr, int nokick)
87{ 81{
88 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 82 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
89 atomic_inc(&ppc_n_lost_interrupts); 83 atomic_inc(&ppc_n_lost_interrupts);
@@ -92,8 +86,7 @@ __set_lost(unsigned long irq_nr, int nokick)
92 } 86 }
93} 87}
94 88
95static void 89static void pmac_mask_and_ack_irq(unsigned int irq_nr)
96pmac_mask_and_ack_irq(unsigned int irq_nr)
97{ 90{
98 unsigned long bit = 1UL << (irq_nr & 0x1f); 91 unsigned long bit = 1UL << (irq_nr & 0x1f);
99 int i = irq_nr >> 5; 92 int i = irq_nr >> 5;
@@ -224,8 +217,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
224 return IRQ_NONE; 217 return IRQ_NONE;
225} 218}
226 219
227int 220static int pmac_get_irq(struct pt_regs *regs)
228pmac_get_irq(struct pt_regs *regs)
229{ 221{
230 int irq; 222 int irq;
231 unsigned long bits = 0; 223 unsigned long bits = 0;
@@ -256,34 +248,40 @@ pmac_get_irq(struct pt_regs *regs)
256 248
257/* This routine will fix some missing interrupt values in the device tree 249/* This routine will fix some missing interrupt values in the device tree
258 * on the gatwick mac-io controller used by some PowerBooks 250 * on the gatwick mac-io controller used by some PowerBooks
251 *
252 * Walking of OF nodes could use a bit more fixing up here, but it's not
253 * very important as this is all boot time code on static portions of the
254 * device-tree.
255 *
256 * However, the modifications done to "intrs" will have to be removed and
257 * replaced with proper updates of the "interrupts" properties or
258 * AAPL,interrupts, yet to be decided, once the dynamic parsing is there.
259 */ 259 */
260static void __init 260static void __init pmac_fix_gatwick_interrupts(struct device_node *gw,
261pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) 261 int irq_base)
262{ 262{
263 struct device_node *node; 263 struct device_node *node;
264 int count; 264 int count;
265 265
266 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); 266 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
267 node = gw->child;
268 count = 0; 267 count = 0;
269 while(node) 268 for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) {
270 {
271 /* Fix SCC */ 269 /* Fix SCC */
272 if (strcasecmp(node->name, "escc") == 0) 270 if ((strcasecmp(node->name, "escc") == 0) && node->child) {
273 if (node->child) { 271 if (node->child->n_intrs < 3) {
274 if (node->child->n_intrs < 3) { 272 node->child->intrs = &gatwick_int_pool[count];
275 node->child->intrs = &gatwick_int_pool[count]; 273 count += 3;
276 count += 3;
277 }
278 node->child->n_intrs = 3;
279 node->child->intrs[0].line = 15+irq_base;
280 node->child->intrs[1].line = 4+irq_base;
281 node->child->intrs[2].line = 5+irq_base;
282 printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
283 node->child->intrs[0].line,
284 node->child->intrs[1].line,
285 node->child->intrs[2].line);
286 } 274 }
275 node->child->n_intrs = 3;
276 node->child->intrs[0].line = 15+irq_base;
277 node->child->intrs[1].line = 4+irq_base;
278 node->child->intrs[2].line = 5+irq_base;
279 printk(KERN_INFO "irq: fixed SCC on gatwick"
280 " (%d,%d,%d)\n",
281 node->child->intrs[0].line,
282 node->child->intrs[1].line,
283 node->child->intrs[2].line);
284 }
287 /* Fix media-bay & left SWIM */ 285 /* Fix media-bay & left SWIM */
288 if (strcasecmp(node->name, "media-bay") == 0) { 286 if (strcasecmp(node->name, "media-bay") == 0) {
289 struct device_node* ya_node; 287 struct device_node* ya_node;
@@ -292,12 +290,11 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
292 node->intrs = &gatwick_int_pool[count++]; 290 node->intrs = &gatwick_int_pool[count++];
293 node->n_intrs = 1; 291 node->n_intrs = 1;
294 node->intrs[0].line = 29+irq_base; 292 node->intrs[0].line = 29+irq_base;
295 printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n", 293 printk(KERN_INFO "irq: fixed media-bay on gatwick"
296 node->intrs[0].line); 294 " (%d)\n", node->intrs[0].line);
297 295
298 ya_node = node->child; 296 ya_node = node->child;
299 while(ya_node) 297 while(ya_node) {
300 {
301 if (strcasecmp(ya_node->name, "floppy") == 0) { 298 if (strcasecmp(ya_node->name, "floppy") == 0) {
302 if (ya_node->n_intrs < 2) { 299 if (ya_node->n_intrs < 2) {
303 ya_node->intrs = &gatwick_int_pool[count]; 300 ya_node->intrs = &gatwick_int_pool[count];
@@ -323,7 +320,6 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
323 ya_node = ya_node->sibling; 320 ya_node = ya_node->sibling;
324 } 321 }
325 } 322 }
326 node = node->sibling;
327 } 323 }
328 if (count > 10) { 324 if (count > 10) {
329 printk("WARNING !! Gatwick interrupt pool overflow\n"); 325 printk("WARNING !! Gatwick interrupt pool overflow\n");
@@ -338,45 +334,41 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
338 * controller. If we find this second ohare, set it up and fix the 334 * controller. If we find this second ohare, set it up and fix the
339 * interrupt value in the device tree for the ethernet chip. 335 * interrupt value in the device tree for the ethernet chip.
340 */ 336 */
341static int __init enable_second_ohare(void) 337static void __init enable_second_ohare(struct device_node *np)
342{ 338{
343 unsigned char bus, devfn; 339 unsigned char bus, devfn;
344 unsigned short cmd; 340 unsigned short cmd;
345 unsigned long addr;
346 struct device_node *irqctrler = find_devices("pci106b,7");
347 struct device_node *ether; 341 struct device_node *ether;
348 342
349 if (irqctrler == NULL || irqctrler->n_addrs <= 0) 343 /* This code doesn't strictly belong here, it could be part of
350 return -1; 344 * either the PCI initialisation or the feature code. It's kept
351 addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); 345 * here for historical reasons.
352 pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); 346 */
353 max_irqs = 64; 347 if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
354 if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) { 348 struct pci_controller* hose =
355 struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler); 349 pci_find_hose_for_OF_device(np);
356 if (!hose) 350 if (!hose) {
357 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); 351 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
358 else { 352 return;
359 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
360 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
361 cmd &= ~PCI_COMMAND_IO;
362 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
363 } 353 }
354 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
355 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
356 cmd &= ~PCI_COMMAND_IO;
357 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
364 } 358 }
365 359
366 /* Fix interrupt for the modem/ethernet combo controller. The number 360 /* Fix interrupt for the modem/ethernet combo controller. The number
367 in the device tree (27) is bogus (correct for the ethernet-only 361 * in the device tree (27) is bogus (correct for the ethernet-only
368 board but not the combo ethernet/modem board). 362 * board but not the combo ethernet/modem board).
369 The real interrupt is 28 on the second controller -> 28+32 = 60. 363 * The real interrupt is 28 on the second controller -> 28+32 = 60.
370 */ 364 */
371 ether = find_devices("pci1011,14"); 365 ether = of_find_node_by_name(NULL, "pci1011,14");
372 if (ether && ether->n_intrs > 0) { 366 if (ether && ether->n_intrs > 0) {
373 ether->intrs[0].line = 60; 367 ether->intrs[0].line = 60;
374 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", 368 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
375 ether->intrs[0].line); 369 ether->intrs[0].line);
376 } 370 }
377 371 of_node_put(ether);
378 /* Return the interrupt number of the cascade */
379 return irqctrler->intrs[0].line;
380} 372}
381 373
382#ifdef CONFIG_XMON 374#ifdef CONFIG_XMON
@@ -394,189 +386,251 @@ static struct irqaction gatwick_cascade_action = {
394 .mask = CPU_MASK_NONE, 386 .mask = CPU_MASK_NONE,
395 .name = "cascade", 387 .name = "cascade",
396}; 388};
397#endif /* CONFIG_PPC32 */
398 389
399static int pmac_u3_cascade(struct pt_regs *regs, void *data) 390static void __init pmac_pic_probe_oldstyle(void)
400{ 391{
401 return mpic_get_one_irq((struct mpic *)data, regs);
402}
403
404void __init pmac_pic_init(void)
405{
406 struct device_node *irqctrler = NULL;
407 struct device_node *irqctrler2 = NULL;
408 struct device_node *np;
409#ifdef CONFIG_PPC32
410 int i; 392 int i;
411 unsigned long addr;
412 int irq_cascade = -1; 393 int irq_cascade = -1;
413#endif 394 struct device_node *master = NULL;
414 struct mpic *mpic1, *mpic2; 395 struct device_node *slave = NULL;
396 u8 __iomem *addr;
397 struct resource r;
415 398
416 /* We first try to detect Apple's new Core99 chipset, since mac-io 399 /* Set our get_irq function */
417 * is quite different on those machines and contains an IBM MPIC2. 400 ppc_md.get_irq = pmac_get_irq;
418 */
419 np = find_type_devices("open-pic");
420 while (np) {
421 if (np->parent && !strcmp(np->parent->name, "u3"))
422 irqctrler2 = np;
423 else
424 irqctrler = np;
425 np = np->next;
426 }
427 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
428 unsigned char senses[128];
429
430 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
431 (unsigned int)irqctrler->addrs[0].address);
432 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
433
434 prom_get_irq_senses(senses, 0, 128);
435 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
436 MPIC_PRIMARY | MPIC_WANTS_RESET,
437 0, 0, 128, 252, senses, 128, " OpenPIC ");
438 BUG_ON(mpic1 == NULL);
439 mpic_init(mpic1);
440
441 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
442 irqctrler2->n_addrs > 0) {
443 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
444 (u32)irqctrler2->addrs[0].address,
445 irqctrler2->intrs[0].line);
446
447 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
448 prom_get_irq_senses(senses, 128, 128 + 124);
449
450 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
451 * hypertransport interrupts routed to it
452 */
453 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
454 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
455 0, 128, 124, 0, senses, 124,
456 " U3-MPIC ");
457 BUG_ON(mpic2 == NULL);
458 mpic_init(mpic2);
459 mpic_setup_cascade(irqctrler2->intrs[0].line,
460 pmac_u3_cascade, mpic2);
461 }
462#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
463 {
464 struct device_node* pswitch;
465 int nmi_irq;
466
467 pswitch = find_devices("programmer-switch");
468 if (pswitch && pswitch->n_intrs) {
469 nmi_irq = pswitch->intrs[0].line;
470 mpic_irq_set_priority(nmi_irq, 9);
471 setup_irq(nmi_irq, &xmon_action);
472 }
473 }
474#endif /* CONFIG_XMON */
475 return;
476 }
477 irqctrler = NULL;
478 401
479#ifdef CONFIG_PPC32 402 /*
480 /* Get the level/edge settings, assume if it's not 403 * Find the interrupt controller type & node
481 * a Grand Central nor an OHare, then it's an Heathrow
482 * (or Paddington).
483 */ 404 */
484 ppc_md.get_irq = pmac_get_irq; 405
485 if (find_devices("gc")) 406 if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
407 max_irqs = max_real_irqs = 32;
486 level_mask[0] = GC_LEVEL_MASK; 408 level_mask[0] = GC_LEVEL_MASK;
487 else if (find_devices("ohare")) { 409 } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
410 max_irqs = max_real_irqs = 32;
488 level_mask[0] = OHARE_LEVEL_MASK; 411 level_mask[0] = OHARE_LEVEL_MASK;
412
489 /* We might have a second cascaded ohare */ 413 /* We might have a second cascaded ohare */
490 level_mask[1] = OHARE_LEVEL_MASK; 414 slave = of_find_node_by_name(NULL, "pci106b,7");
491 } else { 415 if (slave) {
416 max_irqs = 64;
417 level_mask[1] = OHARE_LEVEL_MASK;
418 enable_second_ohare(slave);
419 }
420 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
421 max_irqs = max_real_irqs = 64;
492 level_mask[0] = HEATHROW_LEVEL_MASK; 422 level_mask[0] = HEATHROW_LEVEL_MASK;
493 level_mask[1] = 0; 423 level_mask[1] = 0;
424
494 /* We might have a second cascaded heathrow */ 425 /* We might have a second cascaded heathrow */
495 level_mask[2] = HEATHROW_LEVEL_MASK; 426 slave = of_find_node_by_name(master, "mac-io");
496 level_mask[3] = 0; 427
497 } 428 /* Check ordering of master & slave */
429 if (device_is_compatible(master, "gatwick")) {
430 struct device_node *tmp;
431 BUG_ON(slave == NULL);
432 tmp = master;
433 master = slave;
434 slave = tmp;
435 }
498 436
499 /* 437 /* We found a slave */
500 * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts, 438 if (slave) {
501 * 1998 G3 Series PowerBooks have 128,
502 * other powermacs have 32.
503 * The combo ethernet/modem card for the Powerstar powerbooks
504 * (2400/3400/3500, ohare based) has a second ohare chip
505 * effectively making a total of 64.
506 */
507 max_irqs = max_real_irqs = 32;
508 irqctrler = find_devices("mac-io");
509 if (irqctrler)
510 {
511 max_real_irqs = 64;
512 if (irqctrler->next)
513 max_irqs = 128; 439 max_irqs = 128;
514 else 440 level_mask[2] = HEATHROW_LEVEL_MASK;
515 max_irqs = 64; 441 level_mask[3] = 0;
442 pmac_fix_gatwick_interrupts(slave, max_real_irqs);
443 }
516 } 444 }
445 BUG_ON(master == NULL);
446
447 /* Set the handler for the main PIC */
517 for ( i = 0; i < max_real_irqs ; i++ ) 448 for ( i = 0; i < max_real_irqs ; i++ )
518 irq_desc[i].handler = &pmac_pic; 449 irq_desc[i].handler = &pmac_pic;
519 450
520 /* get addresses of first controller */ 451 /* Get addresses of first controller if we have a node for it */
521 if (irqctrler) { 452 BUG_ON(of_address_to_resource(master, 0, &r));
522 if (irqctrler->n_addrs > 0) {
523 addr = (unsigned long)
524 ioremap(irqctrler->addrs[0].address, 0x40);
525 for (i = 0; i < 2; ++i)
526 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
527 (addr + (2 - i) * 0x10);
528 }
529 453
530 /* get addresses of second controller */ 454 /* Map interrupts of primary controller */
531 irqctrler = irqctrler->next; 455 addr = (u8 __iomem *) ioremap(r.start, 0x40);
532 if (irqctrler && irqctrler->n_addrs > 0) { 456 i = 0;
533 addr = (unsigned long) 457 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
534 ioremap(irqctrler->addrs[0].address, 0x40); 458 (addr + 0x20);
535 for (i = 2; i < 4; ++i) 459 if (max_real_irqs > 32)
536 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*) 460 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
537 (addr + (4 - i) * 0x10); 461 (addr + 0x10);
538 irq_cascade = irqctrler->intrs[0].line; 462 of_node_put(master);
539 if (device_is_compatible(irqctrler, "gatwick")) 463
540 pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs); 464 printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
541 } 465 master->full_name, max_real_irqs);
542 } else { 466
543 /* older powermacs have a GC (grand central) or ohare at 467 /* Map interrupts of cascaded controller */
544 f3000000, with interrupt control registers at f3000020. */ 468 if (slave && !of_address_to_resource(slave, 0, &r)) {
545 addr = (unsigned long) ioremap(0xf3000000, 0x40); 469 addr = (u8 __iomem *)ioremap(r.start, 0x40);
546 pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20); 470 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
471 (addr + 0x20);
472 if (max_irqs > 64)
473 pmac_irq_hw[i++] =
474 (volatile struct pmac_irq_hw __iomem *)
475 (addr + 0x10);
476 irq_cascade = slave->intrs[0].line;
477
478 printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
479 " cascade: %d\n", slave->full_name,
480 max_irqs - max_real_irqs, irq_cascade);
547 } 481 }
548 482 of_node_put(slave);
549 /* PowerBooks 3400 and 3500 can have a second controller in a second
550 ohare chip, on the combo ethernet/modem card */
551 if (machine_is_compatible("AAPL,3400/2400")
552 || machine_is_compatible("AAPL,3500"))
553 irq_cascade = enable_second_ohare();
554 483
555 /* disable all interrupts in all controllers */ 484 /* disable all interrupts in all controllers */
556 for (i = 0; i * 32 < max_irqs; ++i) 485 for (i = 0; i * 32 < max_irqs; ++i)
557 out_le32(&pmac_irq_hw[i]->enable, 0); 486 out_le32(&pmac_irq_hw[i]->enable, 0);
487
558 /* mark level interrupts */ 488 /* mark level interrupts */
559 for (i = 0; i < max_irqs; i++) 489 for (i = 0; i < max_irqs; i++)
560 if (level_mask[i >> 5] & (1UL << (i & 0x1f))) 490 if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
561 irq_desc[i].status = IRQ_LEVEL; 491 irq_desc[i].status = IRQ_LEVEL;
562 492
563 /* get interrupt line of secondary interrupt controller */ 493 /* Setup handlers for secondary controller and hook cascade irq*/
564 if (irq_cascade >= 0) { 494 if (slave) {
565 printk(KERN_INFO "irq: secondary controller on irq %d\n",
566 (int)irq_cascade);
567 for ( i = max_real_irqs ; i < max_irqs ; i++ ) 495 for ( i = max_real_irqs ; i < max_irqs ; i++ )
568 irq_desc[i].handler = &gatwick_pic; 496 irq_desc[i].handler = &gatwick_pic;
569 setup_irq(irq_cascade, &gatwick_cascade_action); 497 setup_irq(irq_cascade, &gatwick_cascade_action);
570 } 498 }
571 printk("System has %d possible interrupts\n", max_irqs); 499 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
572 if (max_irqs != max_real_irqs)
573 printk(KERN_DEBUG "%d interrupts on main controller\n",
574 max_real_irqs);
575
576#ifdef CONFIG_XMON 500#ifdef CONFIG_XMON
577 setup_irq(20, &xmon_action); 501 setup_irq(20, &xmon_action);
578#endif /* CONFIG_XMON */ 502#endif
579#endif /* CONFIG_PPC32 */ 503}
504#endif /* CONFIG_PPC32 */
505
506static int pmac_u3_cascade(struct pt_regs *regs, void *data)
507{
508 return mpic_get_one_irq((struct mpic *)data, regs);
509}
510
511static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
512{
513#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
514 struct device_node* pswitch;
515 int nmi_irq;
516
517 pswitch = of_find_node_by_name(NULL, "programmer-switch");
518 if (pswitch && pswitch->n_intrs) {
519 nmi_irq = pswitch->intrs[0].line;
520 mpic_irq_set_priority(nmi_irq, 9);
521 setup_irq(nmi_irq, &xmon_action);
522 }
523 of_node_put(pswitch);
524#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
525}
526
527static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
528 int master)
529{
530 unsigned char senses[128];
531 int offset = master ? 0 : 128;
532 int count = master ? 128 : 124;
533 const char *name = master ? " MPIC 1 " : " MPIC 2 ";
534 struct resource r;
535 struct mpic *mpic;
536 unsigned int flags = master ? MPIC_PRIMARY : 0;
537 int rc;
538
539 rc = of_address_to_resource(np, 0, &r);
540 if (rc)
541 return NULL;
542
543 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
544
545 prom_get_irq_senses(senses, offset, offset + count);
546
547 flags |= MPIC_WANTS_RESET;
548 if (get_property(np, "big-endian", NULL))
549 flags |= MPIC_BIG_ENDIAN;
550
551 /* Primary Big Endian means HT interrupts. This is quite dodgy
552 * but works until I find a better way
553 */
554 if (master && (flags & MPIC_BIG_ENDIAN))
555 flags |= MPIC_BROKEN_U3;
556
557 mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0,
558 senses, count, name);
559 if (mpic == NULL)
560 return NULL;
561
562 mpic_init(mpic);
563
564 return mpic;
565 }
566
567static int __init pmac_pic_probe_mpic(void)
568{
569 struct mpic *mpic1, *mpic2;
570 struct device_node *np, *master = NULL, *slave = NULL;
571
572 /* We can have up to 2 MPICs cascaded */
573 for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
574 != NULL;) {
575 if (master == NULL &&
576 get_property(np, "interrupts", NULL) == NULL)
577 master = of_node_get(np);
578 else if (slave == NULL)
579 slave = of_node_get(np);
580 if (master && slave)
581 break;
582 }
583
584 /* Check for bogus setups */
585 if (master == NULL && slave != NULL) {
586 master = slave;
587 slave = NULL;
588 }
589
590 /* Not found, default to good old pmac pic */
591 if (master == NULL)
592 return -ENODEV;
593
594 /* Set master handler */
595 ppc_md.get_irq = mpic_get_irq;
596
597 /* Setup master */
598 mpic1 = pmac_setup_one_mpic(master, 1);
599 BUG_ON(mpic1 == NULL);
600
601 /* Install NMI if any */
602 pmac_pic_setup_mpic_nmi(mpic1);
603
604 of_node_put(master);
605
606 /* No slave, let's go out */
607 if (slave == NULL || slave->n_intrs < 1)
608 return 0;
609
610 mpic2 = pmac_setup_one_mpic(slave, 0);
611 if (mpic2 == NULL) {
612 printk(KERN_ERR "Failed to setup slave MPIC\n");
613 of_node_put(slave);
614 return 0;
615 }
616 mpic_setup_cascade(slave->intrs[0].line, pmac_u3_cascade, mpic2);
617
618 of_node_put(slave);
619 return 0;
620}
621
622
623void __init pmac_pic_init(void)
624{
625 /* We first try to detect Apple's new Core99 chipset, since mac-io
626 * is quite different on those machines and contains an IBM MPIC2.
627 */
628 if (pmac_pic_probe_mpic() == 0)
629 return;
630
631#ifdef CONFIG_PPC32
632 pmac_pic_probe_oldstyle();
633#endif
580} 634}
581 635
582#if defined(CONFIG_PM) && defined(CONFIG_PPC32) 636#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 2ad25e13423..21c7b0f8f32 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -42,10 +42,6 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
42 unsigned long data_port, unsigned long ctrl_port, int *irq); 42 unsigned long data_port, unsigned long ctrl_port, int *irq);
43 43
44extern int pmac_nvram_init(void); 44extern int pmac_nvram_init(void);
45 45extern void pmac_pic_init(void);
46extern struct hw_interrupt_type pmac_pic;
47
48void pmac_pic_init(void);
49int pmac_get_irq(struct pt_regs *regs);
50 46
51#endif /* __PMAC_H__ */ 47#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 7acb0546671..3b1a9d4fcbc 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -60,6 +60,7 @@
60#include <asm/system.h> 60#include <asm/system.h>
61#include <asm/pgtable.h> 61#include <asm/pgtable.h>
62#include <asm/io.h> 62#include <asm/io.h>
63#include <asm/kexec.h>
63#include <asm/pci-bridge.h> 64#include <asm/pci-bridge.h>
64#include <asm/ohare.h> 65#include <asm/ohare.h>
65#include <asm/mediabay.h> 66#include <asm/mediabay.h>
@@ -74,8 +75,8 @@
74#include <asm/iommu.h> 75#include <asm/iommu.h>
75#include <asm/smu.h> 76#include <asm/smu.h>
76#include <asm/pmc.h> 77#include <asm/pmc.h>
77#include <asm/mpic.h>
78#include <asm/lmb.h> 78#include <asm/lmb.h>
79#include <asm/udbg.h>
79 80
80#include "pmac.h" 81#include "pmac.h"
81 82
@@ -321,16 +322,6 @@ void __init pmac_setup_arch(void)
321 l2cr_init(); 322 l2cr_init();
322#endif /* CONFIG_PPC32 */ 323#endif /* CONFIG_PPC32 */
323 324
324#ifdef CONFIG_PPC64
325 /* Probe motherboard chipset */
326 /* this is done earlier in setup_arch for 32-bit */
327 pmac_feature_init();
328
329 /* We can NAP */
330 powersave_nap = 1;
331 printk(KERN_INFO "Using native/NAP idle loop\n");
332#endif
333
334#ifdef CONFIG_KGDB 325#ifdef CONFIG_KGDB
335 zs_kgdb_hook(0); 326 zs_kgdb_hook(0);
336#endif 327#endif
@@ -354,7 +345,7 @@ void __init pmac_setup_arch(void)
354 345
355#ifdef CONFIG_SMP 346#ifdef CONFIG_SMP
356 /* Check for Core99 */ 347 /* Check for Core99 */
357 if (find_devices("uni-n") || find_devices("u3")) 348 if (find_devices("uni-n") || find_devices("u3") || find_devices("u4"))
358 smp_ops = &core99_smp_ops; 349 smp_ops = &core99_smp_ops;
359#ifdef CONFIG_PPC32 350#ifdef CONFIG_PPC32
360 else 351 else
@@ -621,35 +612,31 @@ static void __init pmac_init_early(void)
621 * and call ioremap 612 * and call ioremap
622 */ 613 */
623 hpte_init_native(); 614 hpte_init_native();
615#endif
624 616
625 /* Init SCC */ 617 /* Enable early btext debug if requested */
626 if (strstr(cmd_line, "sccdbg")) { 618 if (strstr(cmd_line, "btextdbg")) {
627 sccdbg = 1; 619 udbg_adb_init_early();
628 udbg_init_scc(NULL); 620 register_early_udbg_console();
629 } 621 }
630 622
631 /* Setup interrupt mapping options */ 623 /* Probe motherboard chipset */
632 ppc64_interrupt_controller = IC_OPEN_PIC; 624 pmac_feature_init();
633 625
634 iommu_init_early_u3(); 626 /* We can NAP */
635#endif 627 powersave_nap = 1;
636} 628 printk(KERN_INFO "Using native/NAP idle loop\n");
629
630 /* Initialize debug stuff */
631 udbg_scc_init(!!strstr(cmd_line, "sccdbg"));
632 udbg_adb_init(!!strstr(cmd_line, "btextdbg"));
637 633
638static void __init pmac_progress(char *s, unsigned short hex)
639{
640#ifdef CONFIG_PPC64 634#ifdef CONFIG_PPC64
641 if (sccdbg) { 635 /* Setup interrupt mapping options */
642 udbg_puts(s); 636 ppc64_interrupt_controller = IC_OPEN_PIC;
643 udbg_puts("\n"); 637
644 return; 638 iommu_init_early_dart();
645 }
646#endif 639#endif
647#ifdef CONFIG_BOOTX_TEXT
648 if (boot_text_mapped) {
649 btext_drawstring(s);
650 btext_drawchar('\n');
651 }
652#endif /* CONFIG_BOOTX_TEXT */
653} 640}
654 641
655/* 642/*
@@ -663,35 +650,14 @@ static int pmac_check_legacy_ioport(unsigned int baseport)
663 650
664static int __init pmac_declare_of_platform_devices(void) 651static int __init pmac_declare_of_platform_devices(void)
665{ 652{
666 struct device_node *np, *npp; 653 struct device_node *np;
667 654
668 np = find_devices("uni-n"); 655 np = of_find_node_by_name(NULL, "valkyrie");
669 if (np) {
670 for (np = np->child; np != NULL; np = np->sibling)
671 if (strncmp(np->name, "i2c", 3) == 0) {
672 of_platform_device_create(np, "uni-n-i2c",
673 NULL);
674 break;
675 }
676 }
677 np = find_devices("valkyrie");
678 if (np) 656 if (np)
679 of_platform_device_create(np, "valkyrie", NULL); 657 of_platform_device_create(np, "valkyrie", NULL);
680 np = find_devices("platinum"); 658 np = of_find_node_by_name(NULL, "platinum");
681 if (np) 659 if (np)
682 of_platform_device_create(np, "platinum", NULL); 660 of_platform_device_create(np, "platinum", NULL);
683
684 npp = of_find_node_by_name(NULL, "u3");
685 if (npp) {
686 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
687 if (strncmp(np->name, "i2c", 3) == 0) {
688 of_platform_device_create(np, "u3-i2c", NULL);
689 of_node_put(np);
690 break;
691 }
692 }
693 of_node_put(npp);
694 }
695 np = of_find_node_by_type(NULL, "smu"); 661 np = of_find_node_by_type(NULL, "smu");
696 if (np) { 662 if (np) {
697 of_platform_device_create(np, "smu", NULL); 663 of_platform_device_create(np, "smu", NULL);
@@ -718,7 +684,7 @@ static int __init pmac_probe(int platform)
718 * occupies having to be broken up so the DART itself is not 684 * occupies having to be broken up so the DART itself is not
719 * part of the cacheable linar mapping 685 * part of the cacheable linar mapping
720 */ 686 */
721 alloc_u3_dart_table(); 687 alloc_dart_table();
722#endif 688#endif
723 689
724#ifdef CONFIG_PMAC_SMU 690#ifdef CONFIG_PMAC_SMU
@@ -734,15 +700,17 @@ static int __init pmac_probe(int platform)
734} 700}
735 701
736#ifdef CONFIG_PPC64 702#ifdef CONFIG_PPC64
737static int pmac_probe_mode(struct pci_bus *bus) 703/* Move that to pci.c */
704static int pmac_pci_probe_mode(struct pci_bus *bus)
738{ 705{
739 struct device_node *node = bus->sysdata; 706 struct device_node *node = bus->sysdata;
740 707
741 /* We need to use normal PCI probing for the AGP bus, 708 /* We need to use normal PCI probing for the AGP bus,
742 since the device for the AGP bridge isn't in the tree. */ 709 * since the device for the AGP bridge isn't in the tree.
743 if (bus->self == NULL && device_is_compatible(node, "u3-agp")) 710 */
711 if (bus->self == NULL && (device_is_compatible(node, "u3-agp") ||
712 device_is_compatible(node, "u4-pcie")))
744 return PCI_PROBE_NORMAL; 713 return PCI_PROBE_NORMAL;
745
746 return PCI_PROBE_DEVTREE; 714 return PCI_PROBE_DEVTREE;
747} 715}
748#endif 716#endif
@@ -756,7 +724,7 @@ struct machdep_calls __initdata pmac_md = {
756 .init_early = pmac_init_early, 724 .init_early = pmac_init_early,
757 .show_cpuinfo = pmac_show_cpuinfo, 725 .show_cpuinfo = pmac_show_cpuinfo,
758 .init_IRQ = pmac_pic_init, 726 .init_IRQ = pmac_pic_init,
759 .get_irq = mpic_get_irq, /* changed later */ 727 .get_irq = NULL, /* changed later */
760 .pcibios_fixup = pmac_pcibios_fixup, 728 .pcibios_fixup = pmac_pcibios_fixup,
761 .restart = pmac_restart, 729 .restart = pmac_restart,
762 .power_off = pmac_power_off, 730 .power_off = pmac_power_off,
@@ -768,12 +736,17 @@ struct machdep_calls __initdata pmac_md = {
768 .calibrate_decr = pmac_calibrate_decr, 736 .calibrate_decr = pmac_calibrate_decr,
769 .feature_call = pmac_do_feature_call, 737 .feature_call = pmac_do_feature_call,
770 .check_legacy_ioport = pmac_check_legacy_ioport, 738 .check_legacy_ioport = pmac_check_legacy_ioport,
771 .progress = pmac_progress, 739 .progress = udbg_progress,
772#ifdef CONFIG_PPC64 740#ifdef CONFIG_PPC64
773 .pci_probe_mode = pmac_probe_mode, 741 .pci_probe_mode = pmac_pci_probe_mode,
774 .idle_loop = native_idle, 742 .idle_loop = native_idle,
775 .enable_pmcs = power4_enable_pmcs, 743 .enable_pmcs = power4_enable_pmcs,
744#ifdef CONFIG_KEXEC
745 .machine_kexec = default_machine_kexec,
746 .machine_kexec_prepare = default_machine_kexec_prepare,
747 .machine_crash_shutdown = default_machine_crash_shutdown,
776#endif 748#endif
749#endif /* CONFIG_PPC64 */
777#ifdef CONFIG_PPC32 750#ifdef CONFIG_PPC32
778 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, 751 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
779 .pcibios_after_init = pmac_pcibios_after_init, 752 .pcibios_after_init = pmac_pcibios_after_init,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index fb2a7c798e8..0df2cdcd805 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -52,8 +52,9 @@
52#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
53#include <asm/keylargo.h> 53#include <asm/keylargo.h>
54#include <asm/pmac_low_i2c.h> 54#include <asm/pmac_low_i2c.h>
55#include <asm/pmac_pfunc.h>
55 56
56#undef DEBUG 57#define DEBUG
57 58
58#ifdef DEBUG 59#ifdef DEBUG
59#define DBG(fmt...) udbg_printf(fmt) 60#define DBG(fmt...) udbg_printf(fmt)
@@ -62,6 +63,7 @@
62#endif 63#endif
63 64
64extern void __secondary_start_pmac_0(void); 65extern void __secondary_start_pmac_0(void);
66extern int pmac_pfunc_base_install(void);
65 67
66#ifdef CONFIG_PPC32 68#ifdef CONFIG_PPC32
67 69
@@ -361,7 +363,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
361 set_dec(tb_ticks_per_jiffy); 363 set_dec(tb_ticks_per_jiffy);
362 /* XXX fixme */ 364 /* XXX fixme */
363 set_tb(0, 0); 365 set_tb(0, 0);
364 last_jiffy_stamp(cpu_nr) = 0;
365 366
366 if (cpu_nr > 0) { 367 if (cpu_nr > 0) {
367 mb(); 368 mb();
@@ -429,15 +430,62 @@ struct smp_ops_t psurge_smp_ops = {
429}; 430};
430#endif /* CONFIG_PPC32 - actually powersurge support */ 431#endif /* CONFIG_PPC32 - actually powersurge support */
431 432
433/*
434 * Core 99 and later support
435 */
436
437static void (*pmac_tb_freeze)(int freeze);
438static unsigned long timebase;
439static int tb_req;
440
441static void smp_core99_give_timebase(void)
442{
443 unsigned long flags;
444
445 local_irq_save(flags);
446
447 while(!tb_req)
448 barrier();
449 tb_req = 0;
450 (*pmac_tb_freeze)(1);
451 mb();
452 timebase = get_tb();
453 mb();
454 while (timebase)
455 barrier();
456 mb();
457 (*pmac_tb_freeze)(0);
458 mb();
459
460 local_irq_restore(flags);
461}
462
463
464static void __devinit smp_core99_take_timebase(void)
465{
466 unsigned long flags;
467
468 local_irq_save(flags);
469
470 tb_req = 1;
471 mb();
472 while (!timebase)
473 barrier();
474 mb();
475 set_tb(timebase >> 32, timebase & 0xffffffff);
476 timebase = 0;
477 mb();
478 set_dec(tb_ticks_per_jiffy/2);
479
480 local_irq_restore(flags);
481}
482
432#ifdef CONFIG_PPC64 483#ifdef CONFIG_PPC64
433/* 484/*
434 * G5s enable/disable the timebase via an i2c-connected clock chip. 485 * G5s enable/disable the timebase via an i2c-connected clock chip.
435 */ 486 */
436static struct device_node *pmac_tb_clock_chip_host; 487static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
437static u8 pmac_tb_pulsar_addr; 488static u8 pmac_tb_pulsar_addr;
438static void (*pmac_tb_freeze)(int freeze);
439static DEFINE_SPINLOCK(timebase_lock);
440static unsigned long timebase;
441 489
442static void smp_core99_cypress_tb_freeze(int freeze) 490static void smp_core99_cypress_tb_freeze(int freeze)
443{ 491{
@@ -447,19 +495,20 @@ static void smp_core99_cypress_tb_freeze(int freeze)
447 /* Strangely, the device-tree says address is 0xd2, but darwin 495 /* Strangely, the device-tree says address is 0xd2, but darwin
448 * accesses 0xd0 ... 496 * accesses 0xd0 ...
449 */ 497 */
450 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined); 498 pmac_i2c_setmode(pmac_tb_clock_chip_host,
451 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, 499 pmac_i2c_mode_combined);
452 0xd0 | pmac_low_i2c_read, 500 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
453 0x81, &data, 1); 501 0xd0 | pmac_i2c_read,
502 1, 0x81, &data, 1);
454 if (rc != 0) 503 if (rc != 0)
455 goto bail; 504 goto bail;
456 505
457 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c); 506 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
458 507
459 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub); 508 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
460 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, 509 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
461 0xd0 | pmac_low_i2c_write, 510 0xd0 | pmac_i2c_write,
462 0x81, &data, 1); 511 1, 0x81, &data, 1);
463 512
464 bail: 513 bail:
465 if (rc != 0) { 514 if (rc != 0) {
@@ -475,19 +524,20 @@ static void smp_core99_pulsar_tb_freeze(int freeze)
475 u8 data; 524 u8 data;
476 int rc; 525 int rc;
477 526
478 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined); 527 pmac_i2c_setmode(pmac_tb_clock_chip_host,
479 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, 528 pmac_i2c_mode_combined);
480 pmac_tb_pulsar_addr | pmac_low_i2c_read, 529 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
481 0x2e, &data, 1); 530 pmac_tb_pulsar_addr | pmac_i2c_read,
531 1, 0x2e, &data, 1);
482 if (rc != 0) 532 if (rc != 0)
483 goto bail; 533 goto bail;
484 534
485 data = (data & 0x88) | (freeze ? 0x11 : 0x22); 535 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
486 536
487 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub); 537 pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
488 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, 538 rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
489 pmac_tb_pulsar_addr | pmac_low_i2c_write, 539 pmac_tb_pulsar_addr | pmac_i2c_write,
490 0x2e, &data, 1); 540 1, 0x2e, &data, 1);
491 bail: 541 bail:
492 if (rc != 0) { 542 if (rc != 0) {
493 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n", 543 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
@@ -496,54 +546,14 @@ static void smp_core99_pulsar_tb_freeze(int freeze)
496 } 546 }
497} 547}
498 548
499 549static void __init smp_core99_setup_i2c_hwsync(int ncpus)
500static void smp_core99_give_timebase(void)
501{
502 /* Open i2c bus for synchronous access */
503 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
504 panic("Can't open i2c for TB sync !\n");
505
506 spin_lock(&timebase_lock);
507 (*pmac_tb_freeze)(1);
508 mb();
509 timebase = get_tb();
510 spin_unlock(&timebase_lock);
511
512 while (timebase)
513 barrier();
514
515 spin_lock(&timebase_lock);
516 (*pmac_tb_freeze)(0);
517 spin_unlock(&timebase_lock);
518
519 /* Close i2c bus */
520 pmac_low_i2c_close(pmac_tb_clock_chip_host);
521}
522
523
524static void __devinit smp_core99_take_timebase(void)
525{
526 while (!timebase)
527 barrier();
528 spin_lock(&timebase_lock);
529 set_tb(timebase >> 32, timebase & 0xffffffff);
530 timebase = 0;
531 spin_unlock(&timebase_lock);
532}
533
534static void __init smp_core99_setup(int ncpus)
535{ 550{
536 struct device_node *cc = NULL; 551 struct device_node *cc = NULL;
537 struct device_node *p; 552 struct device_node *p;
553 const char *name = NULL;
538 u32 *reg; 554 u32 *reg;
539 int ok; 555 int ok;
540 556
541 /* HW sync only on these platforms */
542 if (!machine_is_compatible("PowerMac7,2") &&
543 !machine_is_compatible("PowerMac7,3") &&
544 !machine_is_compatible("RackMac3,1"))
545 return;
546
547 /* Look for the clock chip */ 557 /* Look for the clock chip */
548 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) { 558 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
549 p = of_get_parent(cc); 559 p = of_get_parent(cc);
@@ -552,124 +562,86 @@ static void __init smp_core99_setup(int ncpus)
552 if (!ok) 562 if (!ok)
553 continue; 563 continue;
554 564
565 pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
566 if (pmac_tb_clock_chip_host == NULL)
567 continue;
555 reg = (u32 *)get_property(cc, "reg", NULL); 568 reg = (u32 *)get_property(cc, "reg", NULL);
556 if (reg == NULL) 569 if (reg == NULL)
557 continue; 570 continue;
558
559 switch (*reg) { 571 switch (*reg) {
560 case 0xd2: 572 case 0xd2:
561 if (device_is_compatible(cc, "pulsar-legacy-slewing")) { 573 if (device_is_compatible(cc,"pulsar-legacy-slewing")) {
562 pmac_tb_freeze = smp_core99_pulsar_tb_freeze; 574 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
563 pmac_tb_pulsar_addr = 0xd2; 575 pmac_tb_pulsar_addr = 0xd2;
564 printk(KERN_INFO "Timebase clock is Pulsar chip\n"); 576 name = "Pulsar";
565 } else if (device_is_compatible(cc, "cy28508")) { 577 } else if (device_is_compatible(cc, "cy28508")) {
566 pmac_tb_freeze = smp_core99_cypress_tb_freeze; 578 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
567 printk(KERN_INFO "Timebase clock is Cypress chip\n"); 579 name = "Cypress";
568 } 580 }
569 break; 581 break;
570 case 0xd4: 582 case 0xd4:
571 pmac_tb_freeze = smp_core99_pulsar_tb_freeze; 583 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
572 pmac_tb_pulsar_addr = 0xd4; 584 pmac_tb_pulsar_addr = 0xd4;
573 printk(KERN_INFO "Timebase clock is Pulsar chip\n"); 585 name = "Pulsar";
574 break; 586 break;
575 } 587 }
576 if (pmac_tb_freeze != NULL) { 588 if (pmac_tb_freeze != NULL)
577 pmac_tb_clock_chip_host = of_get_parent(cc);
578 of_node_put(cc);
579 break; 589 break;
580 }
581 } 590 }
582 if (pmac_tb_freeze == NULL) { 591 if (pmac_tb_freeze != NULL) {
583 smp_ops->give_timebase = smp_generic_give_timebase; 592 /* Open i2c bus for synchronous access */
584 smp_ops->take_timebase = smp_generic_take_timebase; 593 if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
594 printk(KERN_ERR "Failed top open i2c bus for clock"
595 " sync, fallback to software sync !\n");
596 goto no_i2c_sync;
597 }
598 printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
599 name);
600 return;
585 } 601 }
602 no_i2c_sync:
603 pmac_tb_freeze = NULL;
604 pmac_tb_clock_chip_host = NULL;
586} 605}
587 606
588/* nothing to do here, caches are already set up by service processor */ 607
589static inline void __devinit core99_init_caches(int cpu) 608
609/*
610 * Newer G5s uses a platform function
611 */
612
613static void smp_core99_pfunc_tb_freeze(int freeze)
590{ 614{
615 struct device_node *cpus;
616 struct pmf_args args;
617
618 cpus = of_find_node_by_path("/cpus");
619 BUG_ON(cpus == NULL);
620 args.count = 1;
621 args.u[0].v = !freeze;
622 pmf_call_function(cpus, "cpu-timebase", &args);
623 of_node_put(cpus);
591} 624}
592 625
593#else /* CONFIG_PPC64 */ 626#else /* CONFIG_PPC64 */
594 627
595/* 628/*
596 * SMP G4 powermacs use a GPIO to enable/disable the timebase. 629 * SMP G4 use a GPIO to enable/disable the timebase.
597 */ 630 */
598 631
599static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */ 632static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
600 633
601static unsigned int pri_tb_hi, pri_tb_lo; 634static void smp_core99_gpio_tb_freeze(int freeze)
602static unsigned int pri_tb_stamp;
603
604/* not __init, called in sleep/wakeup code */
605void smp_core99_give_timebase(void)
606{ 635{
607 unsigned long flags; 636 if (freeze)
608 unsigned int t; 637 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
609 638 else
610 /* wait for the secondary to be in take_timebase */ 639 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
611 for (t = 100000; t > 0 && !sec_tb_reset; --t)
612 udelay(10);
613 if (!sec_tb_reset) {
614 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
615 return;
616 }
617
618 /* freeze the timebase and read it */
619 /* disable interrupts so the timebase is disabled for the
620 shortest possible time */
621 local_irq_save(flags);
622 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
623 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); 640 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
624 mb();
625 pri_tb_hi = get_tbu();
626 pri_tb_lo = get_tbl();
627 pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
628 mb();
629
630 /* tell the secondary we're ready */
631 sec_tb_reset = 2;
632 mb();
633
634 /* wait for the secondary to have taken it */
635 /* note: can't use udelay here, since it needs the timebase running */
636 for (t = 10000000; t > 0 && sec_tb_reset; --t)
637 barrier();
638 if (sec_tb_reset)
639 /* XXX BUG_ON here? */
640 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
641
642 /* Now, restart the timebase by leaving the GPIO to an open collector */
643 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
644 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
645 local_irq_restore(flags);
646} 641}
647 642
648/* not __init, called in sleep/wakeup code */
649void smp_core99_take_timebase(void)
650{
651 unsigned long flags;
652
653 /* tell the primary we're here */
654 sec_tb_reset = 1;
655 mb();
656
657 /* wait for the primary to set pri_tb_hi/lo */
658 while (sec_tb_reset < 2)
659 mb();
660
661 /* set our stuff the same as the primary */
662 local_irq_save(flags);
663 set_dec(1);
664 set_tb(pri_tb_hi, pri_tb_lo);
665 last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
666 mb();
667 643
668 /* tell the primary we're done */ 644#endif /* !CONFIG_PPC64 */
669 sec_tb_reset = 0;
670 mb();
671 local_irq_restore(flags);
672}
673 645
674/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */ 646/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
675volatile static long int core99_l2_cache; 647volatile static long int core99_l2_cache;
@@ -677,6 +649,7 @@ volatile static long int core99_l3_cache;
677 649
678static void __devinit core99_init_caches(int cpu) 650static void __devinit core99_init_caches(int cpu)
679{ 651{
652#ifndef CONFIG_PPC64
680 if (!cpu_has_feature(CPU_FTR_L2CR)) 653 if (!cpu_has_feature(CPU_FTR_L2CR))
681 return; 654 return;
682 655
@@ -702,30 +675,76 @@ static void __devinit core99_init_caches(int cpu)
702 _set_L3CR(core99_l3_cache); 675 _set_L3CR(core99_l3_cache);
703 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); 676 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
704 } 677 }
678#endif /* !CONFIG_PPC64 */
705} 679}
706 680
707static void __init smp_core99_setup(int ncpus) 681static void __init smp_core99_setup(int ncpus)
708{ 682{
709 struct device_node *cpu; 683#ifdef CONFIG_PPC64
710 u32 *tbprop = NULL; 684
711 int i; 685 /* i2c based HW sync on some G5s */
686 if (machine_is_compatible("PowerMac7,2") ||
687 machine_is_compatible("PowerMac7,3") ||
688 machine_is_compatible("RackMac3,1"))
689 smp_core99_setup_i2c_hwsync(ncpus);
712 690
713 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ 691 /* pfunc based HW sync on recent G5s */
714 cpu = of_find_node_by_type(NULL, "cpu"); 692 if (pmac_tb_freeze == NULL) {
715 if (cpu != NULL) { 693 struct device_node *cpus =
716 tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL); 694 of_find_node_by_path("/cpus");
717 if (tbprop) 695 if (cpus &&
718 core99_tb_gpio = *tbprop; 696 get_property(cpus, "platform-cpu-timebase", NULL)) {
719 of_node_put(cpu); 697 pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
698 printk(KERN_INFO "Processor timebase sync using"
699 " platform function\n");
700 }
720 } 701 }
721 702
722 /* XXX should get this from reg properties */ 703#else /* CONFIG_PPC64 */
723 for (i = 1; i < ncpus; ++i) 704
724 smp_hw_index[i] = i; 705 /* GPIO based HW sync on ppc32 Core99 */
725 powersave_nap = 0; 706 if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) {
726} 707 struct device_node *cpu;
708 u32 *tbprop = NULL;
709
710 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
711 cpu = of_find_node_by_type(NULL, "cpu");
712 if (cpu != NULL) {
713 tbprop = (u32 *)get_property(cpu, "timebase-enable",
714 NULL);
715 if (tbprop)
716 core99_tb_gpio = *tbprop;
717 of_node_put(cpu);
718 }
719 pmac_tb_freeze = smp_core99_gpio_tb_freeze;
720 printk(KERN_INFO "Processor timebase sync using"
721 " GPIO 0x%02x\n", core99_tb_gpio);
722 }
723
724#endif /* CONFIG_PPC64 */
725
726 /* No timebase sync, fallback to software */
727 if (pmac_tb_freeze == NULL) {
728 smp_ops->give_timebase = smp_generic_give_timebase;
729 smp_ops->take_timebase = smp_generic_take_timebase;
730 printk(KERN_INFO "Processor timebase sync using software\n");
731 }
732
733#ifndef CONFIG_PPC64
734 {
735 int i;
736
737 /* XXX should get this from reg properties */
738 for (i = 1; i < ncpus; ++i)
739 smp_hw_index[i] = i;
740 }
727#endif 741#endif
728 742
743 /* 32 bits SMP can't NAP */
744 if (!machine_is_compatible("MacRISC4"))
745 powersave_nap = 0;
746}
747
729static int __init smp_core99_probe(void) 748static int __init smp_core99_probe(void)
730{ 749{
731 struct device_node *cpus; 750 struct device_node *cpus;
@@ -743,8 +762,19 @@ static int __init smp_core99_probe(void)
743 if (ncpus <= 1) 762 if (ncpus <= 1)
744 return 1; 763 return 1;
745 764
765 /* We need to perform some early initialisations before we can start
766 * setting up SMP as we are running before initcalls
767 */
768 pmac_pfunc_base_install();
769 pmac_i2c_init();
770
771 /* Setup various bits like timebase sync method, ability to nap, ... */
746 smp_core99_setup(ncpus); 772 smp_core99_setup(ncpus);
773
774 /* Install IPIs */
747 mpic_request_ipis(); 775 mpic_request_ipis();
776
777 /* Collect l2cr and l3cr values from CPU 0 */
748 core99_init_caches(0); 778 core99_init_caches(0);
749 779
750 return ncpus; 780 return ncpus;
@@ -753,14 +783,15 @@ static int __init smp_core99_probe(void)
753static void __devinit smp_core99_kick_cpu(int nr) 783static void __devinit smp_core99_kick_cpu(int nr)
754{ 784{
755 unsigned int save_vector; 785 unsigned int save_vector;
756 unsigned long new_vector; 786 unsigned long target, flags;
757 unsigned long flags;
758 volatile unsigned int *vector 787 volatile unsigned int *vector
759 = ((volatile unsigned int *)(KERNELBASE+0x100)); 788 = ((volatile unsigned int *)(KERNELBASE+0x100));
760 789
761 if (nr < 0 || nr > 3) 790 if (nr < 0 || nr > 3)
762 return; 791 return;
763 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); 792
793 if (ppc_md.progress)
794 ppc_md.progress("smp_core99_kick_cpu", 0x346);
764 795
765 local_irq_save(flags); 796 local_irq_save(flags);
766 local_irq_disable(); 797 local_irq_disable();
@@ -768,14 +799,11 @@ static void __devinit smp_core99_kick_cpu(int nr)
768 /* Save reset vector */ 799 /* Save reset vector */
769 save_vector = *vector; 800 save_vector = *vector;
770 801
771 /* Setup fake reset vector that does 802 /* Setup fake reset vector that does
772 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE 803 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
773 */ 804 */
774 new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; 805 target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
775 *vector = 0x48000002 + new_vector - KERNELBASE; 806 create_branch((unsigned long)vector, target, BRANCH_SET_LINK);
776
777 /* flush data cache and inval instruction cache */
778 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
779 807
780 /* Put some life in our friend */ 808 /* Put some life in our friend */
781 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); 809 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -805,17 +833,25 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
805 mpic_setup_this_cpu(); 833 mpic_setup_this_cpu();
806 834
807 if (cpu_nr == 0) { 835 if (cpu_nr == 0) {
808#ifdef CONFIG_POWER4 836#ifdef CONFIG_PPC64
809 extern void g5_phy_disable_cpu1(void); 837 extern void g5_phy_disable_cpu1(void);
810 838
839 /* Close i2c bus if it was used for tb sync */
840 if (pmac_tb_clock_chip_host) {
841 pmac_i2c_close(pmac_tb_clock_chip_host);
842 pmac_tb_clock_chip_host = NULL;
843 }
844
811 /* If we didn't start the second CPU, we must take 845 /* If we didn't start the second CPU, we must take
812 * it off the bus 846 * it off the bus
813 */ 847 */
814 if (machine_is_compatible("MacRISC4") && 848 if (machine_is_compatible("MacRISC4") &&
815 num_online_cpus() < 2) 849 num_online_cpus() < 2)
816 g5_phy_disable_cpu1(); 850 g5_phy_disable_cpu1();
817#endif /* CONFIG_POWER4 */ 851#endif /* CONFIG_PPC64 */
818 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349); 852
853 if (ppc_md.progress)
854 ppc_md.progress("core99_setup_cpu 0 done", 0x349);
819 } 855 }
820} 856}
821 857
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index feb0a94e781..5d9afa1fa02 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -258,15 +258,20 @@ int __init via_calibrate_decr(void)
258 volatile unsigned char __iomem *via; 258 volatile unsigned char __iomem *via;
259 int count = VIA_TIMER_FREQ_6 / 100; 259 int count = VIA_TIMER_FREQ_6 / 100;
260 unsigned int dstart, dend; 260 unsigned int dstart, dend;
261 struct resource rsrc;
261 262
262 vias = find_devices("via-cuda"); 263 vias = of_find_node_by_name(NULL, "via-cuda");
263 if (vias == 0) 264 if (vias == 0)
264 vias = find_devices("via-pmu"); 265 vias = of_find_node_by_name(NULL, "via-pmu");
265 if (vias == 0) 266 if (vias == 0)
266 vias = find_devices("via"); 267 vias = of_find_node_by_name(NULL, "via");
267 if (vias == 0 || vias->n_addrs == 0) 268 if (vias == 0 || of_address_to_resource(vias, 0, &rsrc))
268 return 0; 269 return 0;
269 via = ioremap(vias->addrs[0].address, vias->addrs[0].size); 270 via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
271 if (via == NULL) {
272 printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
273 return 0;
274 }
270 275
271 /* set timer 1 for continuous interrupts */ 276 /* set timer 1 for continuous interrupts */
272 out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); 277 out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c
new file mode 100644
index 00000000000..06c8265c2ba
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -0,0 +1,221 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/errno.h>
5#include <linux/bitops.h>
6#include <linux/ptrace.h>
7#include <linux/adb.h>
8#include <linux/pmu.h>
9#include <linux/cuda.h>
10#include <asm/machdep.h>
11#include <asm/io.h>
12#include <asm/page.h>
13#include <asm/xmon.h>
14#include <asm/prom.h>
15#include <asm/bootx.h>
16#include <asm/machdep.h>
17#include <asm/errno.h>
18#include <asm/pmac_feature.h>
19#include <asm/processor.h>
20#include <asm/delay.h>
21#include <asm/btext.h>
22#include <asm/time.h>
23#include <asm/udbg.h>
24
25/*
26 * This implementation is "special", it can "patch" the current
27 * udbg implementation and work on top of it. It must thus be
28 * initialized last
29 */
30
31static void (*udbg_adb_old_putc)(char c);
32static int (*udbg_adb_old_getc)(void);
33static int (*udbg_adb_old_getc_poll)(void);
34
35static enum {
36 input_adb_none,
37 input_adb_pmu,
38 input_adb_cuda,
39} input_type = input_adb_none;
40
41int xmon_wants_key, xmon_adb_keycode;
42
43static inline void udbg_adb_poll(void)
44{
45#ifdef CONFIG_ADB_PMU
46 if (input_type == input_adb_pmu)
47 pmu_poll_adb();
48#endif /* CONFIG_ADB_PMU */
49#ifdef CONFIG_ADB_CUDA
50 if (input_type == input_adb_cuda)
51 cuda_poll();
52#endif /* CONFIG_ADB_CUDA */
53}
54
55#ifdef CONFIG_BOOTX_TEXT
56
57static int udbg_adb_use_btext;
58static int xmon_adb_shiftstate;
59
60static unsigned char xmon_keytab[128] =
61 "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
62 "yt123465=97-80]o" /* 0x10 - 0x1f */
63 "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
64 "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
65 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
66 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
67
68static unsigned char xmon_shift_keytab[128] =
69 "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
70 "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
71 "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
72 "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
73 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
74 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
75
76static int udbg_adb_local_getc(void)
77{
78 int k, t, on;
79
80 xmon_wants_key = 1;
81 for (;;) {
82 xmon_adb_keycode = -1;
83 t = 0;
84 on = 0;
85 k = -1;
86 do {
87 if (--t < 0) {
88 on = 1 - on;
89 btext_drawchar(on? 0xdb: 0x20);
90 btext_drawchar('\b');
91 t = 200000;
92 }
93 udbg_adb_poll();
94 if (udbg_adb_old_getc_poll)
95 k = udbg_adb_old_getc_poll();
96 } while (k == -1 && xmon_adb_keycode == -1);
97 if (on)
98 btext_drawstring(" \b");
99 if (k != -1)
100 return k;
101 k = xmon_adb_keycode;
102
103 /* test for shift keys */
104 if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
105 xmon_adb_shiftstate = (k & 0x80) == 0;
106 continue;
107 }
108 if (k >= 0x80)
109 continue; /* ignore up transitions */
110 k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
111 if (k != 0)
112 break;
113 }
114 xmon_wants_key = 0;
115 return k;
116}
117#endif /* CONFIG_BOOTX_TEXT */
118
119static int udbg_adb_getc(void)
120{
121#ifdef CONFIG_BOOTX_TEXT
122 if (udbg_adb_use_btext && input_type != input_adb_none)
123 return udbg_adb_local_getc();
124#endif
125 if (udbg_adb_old_getc)
126 return udbg_adb_old_getc();
127 return -1;
128}
129
130/* getc_poll() is not really used, unless you have the xmon-over modem
131 * hack that doesn't quite concern us here, thus we just poll the low level
132 * ADB driver to prevent it from timing out and call back the original poll
133 * routine.
134 */
135static int udbg_adb_getc_poll(void)
136{
137 udbg_adb_poll();
138
139 if (udbg_adb_old_getc_poll)
140 return udbg_adb_old_getc_poll();
141 return -1;
142}
143
144static void udbg_adb_putc(char c)
145{
146#ifdef CONFIG_BOOTX_TEXT
147 if (udbg_adb_use_btext)
148 btext_drawchar(c);
149#endif
150 if (udbg_adb_old_putc)
151 return udbg_adb_old_putc(c);
152}
153
154void udbg_adb_init_early(void)
155{
156#ifdef CONFIG_BOOTX_TEXT
157 if (btext_find_display(1) == 0) {
158 udbg_adb_use_btext = 1;
159 udbg_putc = udbg_adb_putc;
160 }
161#endif
162}
163
164int udbg_adb_init(int force_btext)
165{
166 struct device_node *np;
167
168 /* Capture existing callbacks */
169 udbg_adb_old_putc = udbg_putc;
170 udbg_adb_old_getc = udbg_getc;
171 udbg_adb_old_getc_poll = udbg_getc_poll;
172
173 /* Check if our early init was already called */
174 if (udbg_adb_old_putc == udbg_adb_putc)
175 udbg_adb_old_putc = NULL;
176#ifdef CONFIG_BOOTX_TEXT
177 if (udbg_adb_old_putc == btext_drawchar)
178 udbg_adb_old_putc = NULL;
179#endif
180
181 /* Set ours as output */
182 udbg_putc = udbg_adb_putc;
183 udbg_getc = udbg_adb_getc;
184 udbg_getc_poll = udbg_adb_getc_poll;
185
186#ifdef CONFIG_BOOTX_TEXT
187 /* Check if we should use btext output */
188 if (btext_find_display(force_btext) == 0)
189 udbg_adb_use_btext = 1;
190#endif
191
192 /* See if there is a keyboard in the device tree with a parent
193 * of type "adb". If not, we return a failure, but we keep the
194 * bext output set for now
195 */
196 for (np = NULL; (np = of_find_node_by_name(np, "keyboard")) != NULL;) {
197 struct device_node *parent = of_get_parent(np);
198 int found = (parent && strcmp(parent->type, "adb") == 0);
199 of_node_put(parent);
200 if (found)
201 break;
202 }
203 if (np == NULL)
204 return -ENODEV;
205 of_node_put(np);
206
207#ifdef CONFIG_ADB_PMU
208 if (find_via_pmu())
209 input_type = input_adb_pmu;
210#endif
211#ifdef CONFIG_ADB_CUDA
212 if (find_via_cuda())
213 input_type = input_adb_cuda;
214#endif
215
216 /* Same as above: nothing found, keep btext set for output */
217 if (input_type == input_adb_none)
218 return -ENODEV;
219
220 return 0;
221}
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
new file mode 100644
index 00000000000..e87d53acfb6
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -0,0 +1,165 @@
1/*
2 * udbg for for zilog scc ports as found on Apple PowerMacs
3 *
4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <linux/types.h>
13#include <asm/udbg.h>
14#include <asm/processor.h>
15#include <asm/io.h>
16#include <asm/prom.h>
17#include <asm/pmac_feature.h>
18
19extern u8 real_readb(volatile u8 __iomem *addr);
20extern void real_writeb(u8 data, volatile u8 __iomem *addr);
21
22#define SCC_TXRDY 4
23#define SCC_RXRDY 1
24
25static volatile u8 __iomem *sccc;
26static volatile u8 __iomem *sccd;
27
28static void udbg_scc_putc(char c)
29{
30 if (sccc) {
31 while ((in_8(sccc) & SCC_TXRDY) == 0)
32 ;
33 out_8(sccd, c);
34 if (c == '\n')
35 udbg_scc_putc('\r');
36 }
37}
38
39static int udbg_scc_getc_poll(void)
40{
41 if (sccc) {
42 if ((in_8(sccc) & SCC_RXRDY) != 0)
43 return in_8(sccd);
44 else
45 return -1;
46 }
47 return -1;
48}
49
50static int udbg_scc_getc(void)
51{
52 if (sccc) {
53 while ((in_8(sccc) & SCC_RXRDY) == 0)
54 ;
55 return in_8(sccd);
56 }
57 return -1;
58}
59
60static unsigned char scc_inittab[] = {
61 13, 0, /* set baud rate divisor */
62 12, 0,
63 14, 1, /* baud rate gen enable, src=rtxc */
64 11, 0x50, /* clocks = br gen */
65 5, 0xea, /* tx 8 bits, assert DTR & RTS */
66 4, 0x46, /* x16 clock, 1 stop */
67 3, 0xc1, /* rx enable, 8 bits */
68};
69
70void udbg_scc_init(int force_scc)
71{
72 u32 *reg;
73 unsigned long addr;
74 struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
75 struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
76 char *path;
77 int i, x;
78
79 escc = of_find_node_by_name(NULL, "escc");
80 if (escc == NULL)
81 goto bail;
82 macio = of_get_parent(escc);
83 if (macio == NULL)
84 goto bail;
85 path = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
86 if (path != NULL)
87 stdout = of_find_node_by_path(path);
88 for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
89 if (ch == stdout)
90 ch_def = of_node_get(ch);
91 if (strcmp(ch->name, "ch-a") == 0)
92 ch_a = of_node_get(ch);
93 }
94 if (ch_def == NULL && !force_scc)
95 goto bail;
96
97 ch = ch_def ? ch_def : ch_a;
98
99 /* Get address within mac-io ASIC */
100 reg = (u32 *)get_property(escc, "reg", NULL);
101 if (reg == NULL)
102 goto bail;
103 addr = reg[0];
104
105 /* Get address of mac-io PCI itself */
106 reg = (u32 *)get_property(macio, "assigned-addresses", NULL);
107 if (reg == NULL)
108 goto bail;
109 addr += reg[2];
110
111 /* Lock the serial port */
112 pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
113 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
114
115
116 /* Setup for 57600 8N1 */
117 if (ch == ch_a)
118 addr += 0x20;
119 sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
120 sccc += addr & ~PAGE_MASK;
121 sccd = sccc + 0x10;
122
123 mb();
124
125 for (i = 20000; i != 0; --i)
126 x = in_8(sccc);
127 out_8(sccc, 0x09); /* reset A or B side */
128 out_8(sccc, 0xc0);
129 for (i = 0; i < sizeof(scc_inittab); ++i)
130 out_8(sccc, scc_inittab[i]);
131
132 udbg_putc = udbg_scc_putc;
133 udbg_getc = udbg_scc_getc;
134 udbg_getc_poll = udbg_scc_getc_poll;
135
136 udbg_puts("Hello World !\n");
137
138 bail:
139 of_node_put(macio);
140 of_node_put(escc);
141 of_node_put(stdout);
142 of_node_put(ch_def);
143 of_node_put(ch_a);
144}
145
146#ifdef CONFIG_PPC64
147static void udbg_real_scc_putc(char c)
148{
149 while ((real_readb(sccc) & SCC_TXRDY) == 0)
150 ;
151 real_writeb(c, sccd);
152 if (c == '\n')
153 udbg_real_scc_putc('\r');
154}
155
156void udbg_init_pmac_realmode(void)
157{
158 sccc = (volatile u8 __iomem *)0x80013020ul;
159 sccd = (volatile u8 __iomem *)0x80013030ul;
160
161 udbg_putc = udbg_real_scc_putc;
162 udbg_getc = NULL;
163 udbg_getc_poll = NULL;
164}
165#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 06d5ef50121..6accdd15550 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,5 +1,5 @@
1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \ 1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
2 setup.o iommu.o ras.o rtasd.o 2 setup.o iommu.o ras.o rtasd.o pci_dlpar.o
3obj-$(CONFIG_SMP) += smp.o 3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o 4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o 5obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index c8d2a40dc5b..7fbfd16d72b 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1093,6 +1093,15 @@ void eeh_add_device_early(struct device_node *dn)
1093} 1093}
1094EXPORT_SYMBOL_GPL(eeh_add_device_early); 1094EXPORT_SYMBOL_GPL(eeh_add_device_early);
1095 1095
1096void eeh_add_device_tree_early(struct device_node *dn)
1097{
1098 struct device_node *sib;
1099 for (sib = dn->child; sib; sib = sib->sibling)
1100 eeh_add_device_tree_early(sib);
1101 eeh_add_device_early(dn);
1102}
1103EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1104
1096/** 1105/**
1097 * eeh_add_device_late - perform EEH initialization for the indicated pci device 1106 * eeh_add_device_late - perform EEH initialization for the indicated pci device
1098 * @dev: pci device for which to set up EEH 1107 * @dev: pci device for which to set up EEH
@@ -1147,6 +1156,23 @@ void eeh_remove_device(struct pci_dev *dev)
1147} 1156}
1148EXPORT_SYMBOL_GPL(eeh_remove_device); 1157EXPORT_SYMBOL_GPL(eeh_remove_device);
1149 1158
1159void eeh_remove_bus_device(struct pci_dev *dev)
1160{
1161 eeh_remove_device(dev);
1162 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1163 struct pci_bus *bus = dev->subordinate;
1164 struct list_head *ln;
1165 if (!bus)
1166 return;
1167 for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
1168 struct pci_dev *pdev = pci_dev_b(ln);
1169 if (pdev)
1170 eeh_remove_bus_device(pdev);
1171 }
1172 }
1173}
1174EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1175
1150static int proc_eeh_show(struct seq_file *m, void *v) 1176static int proc_eeh_show(struct seq_file *m, void *v)
1151{ 1177{
1152 unsigned int cpu; 1178 unsigned int cpu;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 2043659ea7b..169f9148789 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -436,7 +436,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
436 return; 436 return;
437 } 437 }
438 438
439 ppci = pdn->data; 439 ppci = PCI_DN(pdn);
440 if (!ppci->iommu_table) { 440 if (!ppci->iommu_table) {
441 /* Bussubno hasn't been copied yet. 441 /* Bussubno hasn't been copied yet.
442 * Do it now because iommu_table_setparms_lpar needs it. 442 * Do it now because iommu_table_setparms_lpar needs it.
@@ -483,10 +483,10 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
483 * an already allocated iommu table is found and use that. 483 * an already allocated iommu table is found and use that.
484 */ 484 */
485 485
486 while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL) 486 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
487 dn = dn->parent; 487 dn = dn->parent;
488 488
489 if (dn && dn->data) { 489 if (dn && PCI_DN(dn)) {
490 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; 490 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
491 } else { 491 } else {
492 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); 492 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
@@ -497,7 +497,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
497{ 497{
498 int err = NOTIFY_OK; 498 int err = NOTIFY_OK;
499 struct device_node *np = node; 499 struct device_node *np = node;
500 struct pci_dn *pci = np->data; 500 struct pci_dn *pci = PCI_DN(np);
501 501
502 switch (action) { 502 switch (action) {
503 case PSERIES_RECONFIG_REMOVE: 503 case PSERIES_RECONFIG_REMOVE:
@@ -533,7 +533,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
533 */ 533 */
534 dn = pci_device_to_OF_node(dev); 534 dn = pci_device_to_OF_node(dev);
535 535
536 for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table; 536 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
537 pdn = pdn->parent) { 537 pdn = pdn->parent) {
538 dma_window = (unsigned int *) 538 dma_window = (unsigned int *)
539 get_property(pdn, "ibm,dma-window", NULL); 539 get_property(pdn, "ibm,dma-window", NULL);
@@ -552,7 +552,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
552 DBG("Found DMA window, allocating table\n"); 552 DBG("Found DMA window, allocating table\n");
553 } 553 }
554 554
555 pci = pdn->data; 555 pci = PCI_DN(pdn);
556 if (!pci->iommu_table) { 556 if (!pci->iommu_table) {
557 /* iommu_table_setparms_lpar needs bussubno. */ 557 /* iommu_table_setparms_lpar needs bussubno. */
558 pci->bussubno = pci->phb->bus->number; 558 pci->bussubno = pci->phb->bus->number;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index cf1bc11b334..1fe445ab78a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -24,6 +24,7 @@
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/console.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
28#include <asm/mmu.h> 29#include <asm/mmu.h>
29#include <asm/page.h> 30#include <asm/page.h>
@@ -60,7 +61,7 @@ extern void pSeries_find_serial_port(void);
60int vtermno; /* virtual terminal# for udbg */ 61int vtermno; /* virtual terminal# for udbg */
61 62
62#define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) 63#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
63static void udbg_hvsi_putc(unsigned char c) 64static void udbg_hvsi_putc(char c)
64{ 65{
65 /* packet's seqno isn't used anyways */ 66 /* packet's seqno isn't used anyways */
66 uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c }; 67 uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c };
@@ -111,7 +112,7 @@ static int udbg_hvsi_getc_poll(void)
111 return ch; 112 return ch;
112} 113}
113 114
114static unsigned char udbg_hvsi_getc(void) 115static int udbg_hvsi_getc(void)
115{ 116{
116 int ch; 117 int ch;
117 for (;;) { 118 for (;;) {
@@ -127,7 +128,7 @@ static unsigned char udbg_hvsi_getc(void)
127 } 128 }
128} 129}
129 130
130static void udbg_putcLP(unsigned char c) 131static void udbg_putcLP(char c)
131{ 132{
132 char buf[16]; 133 char buf[16];
133 unsigned long rc; 134 unsigned long rc;
@@ -172,7 +173,7 @@ static int udbg_getc_pollLP(void)
172 return ch; 173 return ch;
173} 174}
174 175
175static unsigned char udbg_getcLP(void) 176static int udbg_getcLP(void)
176{ 177{
177 int ch; 178 int ch;
178 for (;;) { 179 for (;;) {
@@ -191,7 +192,7 @@ static unsigned char udbg_getcLP(void)
191/* call this from early_init() for a working debug console on 192/* call this from early_init() for a working debug console on
192 * vterm capable LPAR machines 193 * vterm capable LPAR machines
193 */ 194 */
194void udbg_init_debug_lpar(void) 195void __init udbg_init_debug_lpar(void)
195{ 196{
196 vtermno = 0; 197 vtermno = 0;
197 udbg_putc = udbg_putcLP; 198 udbg_putc = udbg_putcLP;
@@ -200,63 +201,54 @@ void udbg_init_debug_lpar(void)
200} 201}
201 202
202/* returns 0 if couldn't find or use /chosen/stdout as console */ 203/* returns 0 if couldn't find or use /chosen/stdout as console */
203int find_udbg_vterm(void) 204void __init find_udbg_vterm(void)
204{ 205{
205 struct device_node *stdout_node; 206 struct device_node *stdout_node;
206 u32 *termno; 207 u32 *termno;
207 char *name; 208 char *name;
208 int found = 0; 209 int add_console;
209 210
210 /* find the boot console from /chosen/stdout */ 211 /* find the boot console from /chosen/stdout */
211 if (!of_chosen) 212 if (!of_chosen)
212 return 0; 213 return;
213 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); 214 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
214 if (name == NULL) 215 if (name == NULL)
215 return 0; 216 return;
216 stdout_node = of_find_node_by_path(name); 217 stdout_node = of_find_node_by_path(name);
217 if (!stdout_node) 218 if (!stdout_node)
218 return 0; 219 return;
219
220 /* now we have the stdout node; figure out what type of device it is. */
221 name = (char *)get_property(stdout_node, "name", NULL); 220 name = (char *)get_property(stdout_node, "name", NULL);
222 if (!name) { 221 if (!name) {
223 printk(KERN_WARNING "stdout node missing 'name' property!\n"); 222 printk(KERN_WARNING "stdout node missing 'name' property!\n");
224 goto out; 223 goto out;
225 } 224 }
225 /* The user has requested a console so this is already set up. */
226 add_console = !strstr(cmd_line, "console=");
226 227
227 if (strncmp(name, "vty", 3) == 0) { 228 /* Check if it's a virtual terminal */
228 if (device_is_compatible(stdout_node, "hvterm1")) { 229 if (strncmp(name, "vty", 3) != 0)
229 termno = (u32 *)get_property(stdout_node, "reg", NULL); 230 goto out;
230 if (termno) { 231 termno = (u32 *)get_property(stdout_node, "reg", NULL);
231 vtermno = termno[0]; 232 if (termno == NULL)
232 udbg_putc = udbg_putcLP; 233 goto out;
233 udbg_getc = udbg_getcLP; 234 vtermno = termno[0];
234 udbg_getc_poll = udbg_getc_pollLP; 235
235 found = 1; 236 if (device_is_compatible(stdout_node, "hvterm1")) {
236 } 237 udbg_putc = udbg_putcLP;
237 } else if (device_is_compatible(stdout_node, "hvterm-protocol")) { 238 udbg_getc = udbg_getcLP;
238 termno = (u32 *)get_property(stdout_node, "reg", NULL); 239 udbg_getc_poll = udbg_getc_pollLP;
239 if (termno) { 240 if (add_console)
240 vtermno = termno[0]; 241 add_preferred_console("hvc", termno[0] & 0xff, NULL);
241 udbg_putc = udbg_hvsi_putc; 242 } else if (device_is_compatible(stdout_node, "hvterm-protocol")) {
242 udbg_getc = udbg_hvsi_getc; 243 vtermno = termno[0];
243 udbg_getc_poll = udbg_hvsi_getc_poll; 244 udbg_putc = udbg_hvsi_putc;
244 found = 1; 245 udbg_getc = udbg_hvsi_getc;
245 } 246 udbg_getc_poll = udbg_hvsi_getc_poll;
246 } 247 if (add_console)
247 } else if (strncmp(name, "serial", 6)) { 248 add_preferred_console("hvsi", termno[0] & 0xff, NULL);
248 /* XXX fix ISA serial console */
249 printk(KERN_WARNING "serial stdout on LPAR ('%s')! "
250 "can't print udbg messages\n",
251 stdout_node->full_name);
252 } else {
253 printk(KERN_WARNING "don't know how to print to stdout '%s'\n",
254 stdout_node->full_name);
255 } 249 }
256
257out: 250out:
258 of_node_put(stdout_node); 251 of_node_put(stdout_node);
259 return found;
260} 252}
261 253
262void vpa_init(int cpu) 254void vpa_init(int cpu)
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
new file mode 100644
index 00000000000..21934784f93
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -0,0 +1,174 @@
1/*
2 * PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code
3 * for RPA-compliant PPC64 platform.
4 * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
5 * Copyright (C) 2005 International Business Machines
6 *
7 * Updates, 2005, John Rose <johnrose@austin.ibm.com>
8 * Updates, 2005, Linas Vepstas <linas@austin.ibm.com>
9 *
10 * All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
20 * NON INFRINGEMENT. See the GNU General Public License for more
21 * details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/pci.h>
29#include <asm/pci-bridge.h>
30
31static struct pci_bus *
32find_bus_among_children(struct pci_bus *bus,
33 struct device_node *dn)
34{
35 struct pci_bus *child = NULL;
36 struct list_head *tmp;
37 struct device_node *busdn;
38
39 busdn = pci_bus_to_OF_node(bus);
40 if (busdn == dn)
41 return bus;
42
43 list_for_each(tmp, &bus->children) {
44 child = find_bus_among_children(pci_bus_b(tmp), dn);
45 if (child)
46 break;
47 };
48 return child;
49}
50
51struct pci_bus *
52pcibios_find_pci_bus(struct device_node *dn)
53{
54 struct pci_dn *pdn = dn->data;
55
56 if (!pdn || !pdn->phb || !pdn->phb->bus)
57 return NULL;
58
59 return find_bus_among_children(pdn->phb->bus, dn);
60}
61
62/**
63 * pcibios_remove_pci_devices - remove all devices under this bus
64 *
65 * Remove all of the PCI devices under this bus both from the
66 * linux pci device tree, and from the powerpc EEH address cache.
67 */
68void
69pcibios_remove_pci_devices(struct pci_bus *bus)
70{
71 struct pci_dev *dev, *tmp;
72
73 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
74 eeh_remove_bus_device(dev);
75 pci_remove_bus_device(dev);
76 }
77}
78
79/* Must be called before pci_bus_add_devices */
80void
81pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
82{
83 struct pci_dev *dev;
84
85 list_for_each_entry(dev, &bus->devices, bus_list) {
86 /*
87 * Skip already-present devices (which are on the
88 * global device list.)
89 */
90 if (list_empty(&dev->global_list)) {
91 int i;
92
93 /* Need to setup IOMMU tables */
94 ppc_md.iommu_dev_setup(dev);
95
96 if(fix_bus)
97 pcibios_fixup_device_resources(dev, bus);
98 pci_read_irq_line(dev);
99 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
100 struct resource *r = &dev->resource[i];
101
102 if (r->parent || !r->start || !r->flags)
103 continue;
104 pci_claim_resource(dev, i);
105 }
106 }
107 }
108}
109
110static int
111pcibios_pci_config_bridge(struct pci_dev *dev)
112{
113 u8 sec_busno;
114 struct pci_bus *child_bus;
115 struct pci_dev *child_dev;
116
117 /* Get busno of downstream bus */
118 pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
119
120 /* Add to children of PCI bridge dev->bus */
121 child_bus = pci_add_new_bus(dev->bus, dev, sec_busno);
122 if (!child_bus) {
123 printk (KERN_ERR "%s: could not add second bus\n", __FUNCTION__);
124 return -EIO;
125 }
126 sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number);
127
128 pci_scan_child_bus(child_bus);
129
130 list_for_each_entry(child_dev, &child_bus->devices, bus_list) {
131 eeh_add_device_late(child_dev);
132 }
133
134 /* Fixup new pci devices without touching bus struct */
135 pcibios_fixup_new_pci_devices(child_bus, 0);
136
137 /* Make the discovered devices available */
138 pci_bus_add_devices(child_bus);
139 return 0;
140}
141
142/**
143 * pcibios_add_pci_devices - adds new pci devices to bus
144 *
145 * This routine will find and fixup new pci devices under
146 * the indicated bus. This routine presumes that there
147 * might already be some devices under this bridge, so
148 * it carefully tries to add only new devices. (And that
149 * is how this routine differs from other, similar pcibios
150 * routines.)
151 */
152void
153pcibios_add_pci_devices(struct pci_bus * bus)
154{
155 int slotno, num;
156 struct pci_dev *dev;
157 struct device_node *dn = pci_bus_to_OF_node(bus);
158
159 eeh_add_device_tree_early(dn);
160
161 /* pci_scan_slot should find all children */
162 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
163 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
164 if (num) {
165 pcibios_fixup_new_pci_devices(bus, 1);
166 pci_bus_add_devices(bus);
167 }
168
169 list_for_each_entry(dev, &bus->devices, bus_list) {
170 eeh_add_device_late (dev);
171 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
172 pcibios_pci_config_bridge(dev);
173 }
174}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index fbd214d68b0..b046bcf7443 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -49,14 +49,14 @@
49#include <asm/machdep.h> 49#include <asm/machdep.h>
50#include <asm/rtas.h> 50#include <asm/rtas.h>
51#include <asm/udbg.h> 51#include <asm/udbg.h>
52#include <asm/firmware.h>
53
54#include "ras.h"
52 55
53static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; 56static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
54static DEFINE_SPINLOCK(ras_log_buf_lock); 57static DEFINE_SPINLOCK(ras_log_buf_lock);
55 58
56char mce_data_buf[RTAS_ERROR_LOG_MAX] 59char mce_data_buf[RTAS_ERROR_LOG_MAX];
57;
58/* This is true if we are using the firmware NMI handler (typically LPAR) */
59extern int fwnmi_active;
60 60
61static int ras_get_sensor_state_token; 61static int ras_get_sensor_state_token;
62static int ras_check_exception_token; 62static int ras_check_exception_token;
@@ -280,7 +280,7 @@ static void fwnmi_release_errinfo(void)
280 printk("FWNMI: nmi-interlock failed: %d\n", ret); 280 printk("FWNMI: nmi-interlock failed: %d\n", ret);
281} 281}
282 282
283void pSeries_system_reset_exception(struct pt_regs *regs) 283int pSeries_system_reset_exception(struct pt_regs *regs)
284{ 284{
285 if (fwnmi_active) { 285 if (fwnmi_active) {
286 struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); 286 struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
@@ -289,6 +289,7 @@ void pSeries_system_reset_exception(struct pt_regs *regs)
289 } 289 }
290 fwnmi_release_errinfo(); 290 fwnmi_release_errinfo();
291 } 291 }
292 return 0; /* need to perform reset */
292} 293}
293 294
294/* 295/*
diff --git a/arch/powerpc/platforms/pseries/ras.h b/arch/powerpc/platforms/pseries/ras.h
new file mode 100644
index 00000000000..0e66b0da55e
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/ras.h
@@ -0,0 +1,9 @@
1#ifndef _PSERIES_RAS_H
2#define _PSERIES_RAS_H
3
4struct pt_regs;
5
6extern int pSeries_system_reset_exception(struct pt_regs *regs);
7extern int pSeries_machine_check_exception(struct pt_regs *regs);
8
9#endif /* _PSERIES_RAS_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 4a465f067ed..8903cf63236 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -56,6 +56,7 @@
56#include <asm/dma.h> 56#include <asm/dma.h>
57#include <asm/machdep.h> 57#include <asm/machdep.h>
58#include <asm/irq.h> 58#include <asm/irq.h>
59#include <asm/kexec.h>
59#include <asm/time.h> 60#include <asm/time.h>
60#include <asm/nvram.h> 61#include <asm/nvram.h>
61#include "xics.h" 62#include "xics.h"
@@ -68,6 +69,7 @@
68#include <asm/smp.h> 69#include <asm/smp.h>
69 70
70#include "plpar_wrappers.h" 71#include "plpar_wrappers.h"
72#include "ras.h"
71 73
72#ifdef DEBUG 74#ifdef DEBUG
73#define DBG(fmt...) udbg_printf(fmt) 75#define DBG(fmt...) udbg_printf(fmt)
@@ -76,16 +78,9 @@
76#endif 78#endif
77 79
78extern void find_udbg_vterm(void); 80extern void find_udbg_vterm(void);
79extern void system_reset_fwnmi(void); /* from head.S */
80extern void machine_check_fwnmi(void); /* from head.S */
81extern void generic_find_legacy_serial_ports(u64 *physport,
82 unsigned int *default_speed);
83 81
84int fwnmi_active; /* TRUE if an FWNMI handler is present */ 82int fwnmi_active; /* TRUE if an FWNMI handler is present */
85 83
86extern void pSeries_system_reset_exception(struct pt_regs *regs);
87extern int pSeries_machine_check_exception(struct pt_regs *regs);
88
89static void pseries_shared_idle(void); 84static void pseries_shared_idle(void);
90static void pseries_dedicated_idle(void); 85static void pseries_dedicated_idle(void);
91 86
@@ -105,18 +100,22 @@ void pSeries_show_cpuinfo(struct seq_file *m)
105 100
106/* Initialize firmware assisted non-maskable interrupts if 101/* Initialize firmware assisted non-maskable interrupts if
107 * the firmware supports this feature. 102 * the firmware supports this feature.
108 *
109 */ 103 */
110static void __init fwnmi_init(void) 104static void __init fwnmi_init(void)
111{ 105{
112 int ret; 106 unsigned long system_reset_addr, machine_check_addr;
107
113 int ibm_nmi_register = rtas_token("ibm,nmi-register"); 108 int ibm_nmi_register = rtas_token("ibm,nmi-register");
114 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) 109 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
115 return; 110 return;
116 ret = rtas_call(ibm_nmi_register, 2, 1, NULL, 111
117 __pa((unsigned long)system_reset_fwnmi), 112 /* If the kernel's not linked at zero we point the firmware at low
118 __pa((unsigned long)machine_check_fwnmi)); 113 * addresses anyway, and use a trampoline to get to the real code. */
119 if (ret == 0) 114 system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
115 machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
116
117 if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
118 machine_check_addr))
120 fwnmi_active = 1; 119 fwnmi_active = 1;
121} 120}
122 121
@@ -323,15 +322,18 @@ static void __init pSeries_discover_pic(void)
323 ppc64_interrupt_controller = IC_INVALID; 322 ppc64_interrupt_controller = IC_INVALID;
324 for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { 323 for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
325 typep = (char *)get_property(np, "compatible", NULL); 324 typep = (char *)get_property(np, "compatible", NULL);
326 if (strstr(typep, "open-pic")) 325 if (strstr(typep, "open-pic")) {
327 ppc64_interrupt_controller = IC_OPEN_PIC; 326 ppc64_interrupt_controller = IC_OPEN_PIC;
328 else if (strstr(typep, "ppc-xicp")) 327 break;
328 } else if (strstr(typep, "ppc-xicp")) {
329 ppc64_interrupt_controller = IC_PPC_XIC; 329 ppc64_interrupt_controller = IC_PPC_XIC;
330 else 330 break;
331 printk("pSeries_discover_pic: failed to recognize" 331 }
332 " interrupt-controller\n");
333 break;
334 } 332 }
333 if (ppc64_interrupt_controller == IC_INVALID)
334 printk("pSeries_discover_pic: failed to recognize"
335 " interrupt-controller\n");
336
335} 337}
336 338
337static void pSeries_mach_cpu_die(void) 339static void pSeries_mach_cpu_die(void)
@@ -365,10 +367,7 @@ static int pseries_set_xdabr(unsigned long dabr)
365 */ 367 */
366static void __init pSeries_init_early(void) 368static void __init pSeries_init_early(void)
367{ 369{
368 void *comport;
369 int iommu_off = 0; 370 int iommu_off = 0;
370 unsigned int default_speed;
371 u64 physport;
372 371
373 DBG(" -> pSeries_init_early()\n"); 372 DBG(" -> pSeries_init_early()\n");
374 373
@@ -382,17 +381,8 @@ static void __init pSeries_init_early(void)
382 get_property(of_chosen, "linux,iommu-off", NULL)); 381 get_property(of_chosen, "linux,iommu-off", NULL));
383 } 382 }
384 383
385 generic_find_legacy_serial_ports(&physport, &default_speed);
386
387 if (platform_is_lpar()) 384 if (platform_is_lpar())
388 find_udbg_vterm(); 385 find_udbg_vterm();
389 else if (physport) {
390 /* Map the uart for udbg. */
391 comport = (void *)ioremap(physport, 16);
392 udbg_init_uart(comport, default_speed);
393
394 DBG("Hello World !\n");
395 }
396 386
397 if (firmware_has_feature(FW_FEATURE_DABR)) 387 if (firmware_has_feature(FW_FEATURE_DABR))
398 ppc_md.set_dabr = pseries_set_dabr; 388 ppc_md.set_dabr = pseries_set_dabr;
@@ -638,5 +628,8 @@ struct machdep_calls __initdata pSeries_md = {
638 .machine_check_exception = pSeries_machine_check_exception, 628 .machine_check_exception = pSeries_machine_check_exception,
639#ifdef CONFIG_KEXEC 629#ifdef CONFIG_KEXEC
640 .kexec_cpu_down = pseries_kexec_cpu_down, 630 .kexec_cpu_down = pseries_kexec_cpu_down,
631 .machine_kexec = default_machine_kexec,
632 .machine_kexec_prepare = default_machine_kexec_prepare,
633 .machine_crash_shutdown = default_machine_crash_shutdown,
641#endif 634#endif
642}; 635};
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 0377decc071..0c0cfa32eb5 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -407,7 +407,7 @@ irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
407 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); 407 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
408 } 408 }
409#endif 409#endif
410#ifdef CONFIG_DEBUGGER 410#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
411 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, 411 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
412 &xics_ipi_message[cpu].value)) { 412 &xics_ipi_message[cpu].value)) {
413 mb(); 413 mb();