aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/Makefile9
-rw-r--r--arch/powerpc/platforms/52xx/efika-pci.c119
-rw-r--r--arch/powerpc/platforms/52xx/efika-setup.c150
-rw-r--r--arch/powerpc/platforms/52xx/efika.h19
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c161
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c127
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c473
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.h53
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx_ads.c13
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc8360e_pb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/83xx/pci.c9
-rw-r--r--arch/powerpc/platforms/85xx/misc.c8
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c11
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c10
-rw-r--r--arch/powerpc/platforms/Makefile4
-rw-r--r--arch/powerpc/platforms/cell/Kconfig14
-rw-r--r--arch/powerpc/platforms/cell/Makefile7
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c248
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c71
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.h203
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c228
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h2
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c346
-rw-r--r--arch/powerpc/platforms/cell/iommu.c1049
-rw-r--r--arch/powerpc/platforms/cell/iommu.h65
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c101
-rw-r--r--arch/powerpc/platforms/cell/pmu.c432
-rw-r--r--arch/powerpc/platforms/cell/setup.c78
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c415
-rw-r--r--arch/powerpc/platforms/cell/spu_coredump.c81
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c439
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.h26
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c238
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c536
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c51
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c68
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c149
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h33
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c63
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c9
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig32
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c211
-rw-r--r--arch/powerpc/platforms/embedded6xx/ls_uart.c131
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c94
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/iseries/Makefile8
-rw-r--r--arch/powerpc/platforms/iseries/dt.c15
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c16
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c6
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c4
-rw-r--r--arch/powerpc/platforms/iseries/mf.c6
-rw-r--r--arch/powerpc/platforms/iseries/misc.S35
-rw-r--r--arch/powerpc/platforms/iseries/pci.c372
-rw-r--r--arch/powerpc/platforms/iseries/proc.c11
-rw-r--r--arch/powerpc/platforms/iseries/setup.c51
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c7
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c49
-rw-r--r--arch/powerpc/platforms/maple/setup.c15
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c8
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c18
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c12
-rw-r--r--arch/powerpc/platforms/powermac/feature.c8
-rw-r--r--arch/powerpc/platforms/powermac/pci.c36
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c7
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig54
-rw-r--r--arch/powerpc/platforms/ps3/Makefile4
-rw-r--r--arch/powerpc/platforms/ps3/exports.c27
-rw-r--r--arch/powerpc/platforms/ps3/htab.c277
-rw-r--r--arch/powerpc/platforms/ps3/hvcall.S804
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c575
-rw-r--r--arch/powerpc/platforms/ps3/mm.c831
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c259
-rw-r--r--arch/powerpc/platforms/ps3/platform.h68
-rw-r--r--arch/powerpc/platforms/ps3/repository.c840
-rw-r--r--arch/powerpc/platforms/ps3/setup.c173
-rw-r--r--arch/powerpc/platforms/ps3/smp.c158
-rw-r--r--arch/powerpc/platforms/ps3/spu.c613
-rw-r--r--arch/powerpc/platforms/ps3/time.c104
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c13
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c275
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S9
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c90
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/pci.c35
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c3
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c27
-rw-r--r--arch/powerpc/platforms/pseries/smp.c200
-rw-r--r--arch/powerpc/platforms/pseries/xics.c73
112 files changed, 10926 insertions, 1991 deletions
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
index ed39d6a3d22a..2f2a13ed7667 100644
--- a/arch/powerpc/platforms/4xx/Kconfig
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -179,7 +179,7 @@ config BIOS_FIXUP
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards 179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX 180config 403GCX
181 bool 181 bool
182 depends OAK 182 depends on OAK
183 default y 183 default y
184 184
185config 405EP 185config 405EP
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
new file mode 100644
index 000000000000..a46184a0c750
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for 52xx based boards
3#
4ifeq ($(CONFIG_PPC_MERGE),y)
5obj-y += mpc52xx_pic.o mpc52xx_common.o
6endif
7
8obj-$(CONFIG_PPC_EFIKA) += efika-setup.o efika-pci.o
9obj-$(CONFIG_PPC_LITE5200) += lite5200.o
diff --git a/arch/powerpc/platforms/52xx/efika-pci.c b/arch/powerpc/platforms/52xx/efika-pci.c
new file mode 100644
index 000000000000..62e05b2a9227
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-pci.c
@@ -0,0 +1,119 @@
1
2#include <linux/kernel.h>
3#include <linux/pci.h>
4#include <linux/string.h>
5#include <linux/init.h>
6
7#include <asm/io.h>
8#include <asm/irq.h>
9#include <asm/prom.h>
10#include <asm/machdep.h>
11#include <asm/sections.h>
12#include <asm/pci-bridge.h>
13#include <asm/rtas.h>
14
15#include "efika.h"
16
17#ifdef CONFIG_PCI
18/*
19 * Access functions for PCI config space using RTAS calls.
20 */
21static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
22 int len, u32 * val)
23{
24 struct pci_controller *hose = bus->sysdata;
25 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
26 | (((bus->number - hose->first_busno) & 0xff) << 16)
27 | (hose->index << 24);
28 int ret = -1;
29 int rval;
30
31 rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
32 *val = ret;
33 return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
34}
35
36static int rtas_write_config(struct pci_bus *bus, unsigned int devfn,
37 int offset, int len, u32 val)
38{
39 struct pci_controller *hose = bus->sysdata;
40 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
41 | (((bus->number - hose->first_busno) & 0xff) << 16)
42 | (hose->index << 24);
43 int rval;
44
45 rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
46 addr, len, val);
47 return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
48}
49
50static struct pci_ops rtas_pci_ops = {
51 rtas_read_config,
52 rtas_write_config
53};
54
55void __init efika_pcisetup(void)
56{
57 const int *bus_range;
58 int len;
59 struct pci_controller *hose;
60 struct device_node *root;
61 struct device_node *pcictrl;
62
63 root = of_find_node_by_path("/");
64 if (root == NULL) {
65 printk(KERN_WARNING EFIKA_PLATFORM_NAME
66 ": Unable to find the root node\n");
67 return;
68 }
69
70 for (pcictrl = NULL;;) {
71 pcictrl = of_get_next_child(root, pcictrl);
72 if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0))
73 break;
74 }
75
76 of_node_put(root);
77
78 if (pcictrl == NULL) {
79 printk(KERN_WARNING EFIKA_PLATFORM_NAME
80 ": Unable to find the PCI bridge node\n");
81 return;
82 }
83
84 bus_range = get_property(pcictrl, "bus-range", &len);
85 if (bus_range == NULL || len < 2 * sizeof(int)) {
86 printk(KERN_WARNING EFIKA_PLATFORM_NAME
87 ": Can't get bus-range for %s\n", pcictrl->full_name);
88 return;
89 }
90
91 if (bus_range[1] == bus_range[0])
92 printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d",
93 bus_range[0]);
94 else
95 printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
96 bus_range[0], bus_range[1]);
97 printk(" controlled by %s\n", pcictrl->full_name);
98 printk("\n");
99
100 hose = pcibios_alloc_controller();
101 if (!hose) {
102 printk(KERN_WARNING EFIKA_PLATFORM_NAME
103 ": Can't allocate PCI controller structure for %s\n",
104 pcictrl->full_name);
105 return;
106 }
107
108 hose->arch_data = of_node_get(pcictrl);
109 hose->first_busno = bus_range[0];
110 hose->last_busno = bus_range[1];
111 hose->ops = &rtas_pci_ops;
112
113 pci_process_bridge_OF_ranges(hose, pcictrl, 0);
114}
115
116#else
117void __init efika_pcisetup(void)
118{}
119#endif
diff --git a/arch/powerpc/platforms/52xx/efika-setup.c b/arch/powerpc/platforms/52xx/efika-setup.c
new file mode 100644
index 000000000000..110c980ed1e0
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-setup.c
@@ -0,0 +1,150 @@
1/*
2 *
3 * Efika 5K2 platform setup
4 * Some code really inspired from the lite5200b platform.
5 *
6 * Copyright (C) 2006 bplan GmbH
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 *
12 */
13
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/reboot.h>
18#include <linux/init.h>
19#include <linux/utsrelease.h>
20#include <linux/seq_file.h>
21#include <linux/root_dev.h>
22#include <linux/initrd.h>
23#include <linux/timer.h>
24#include <linux/pci.h>
25
26#include <asm/pgtable.h>
27#include <asm/prom.h>
28#include <asm/time.h>
29#include <asm/machdep.h>
30#include <asm/rtas.h>
31#include <asm/of_device.h>
32#include <asm/of_platform.h>
33#include <asm/mpc52xx.h>
34
35#include "efika.h"
36
37static void efika_show_cpuinfo(struct seq_file *m)
38{
39 struct device_node *root;
40 const char *revision = NULL;
41 const char *codegendescription = NULL;
42 const char *codegenvendor = NULL;
43
44 root = of_find_node_by_path("/");
45 if (root) {
46 revision = get_property(root, "revision", NULL);
47 codegendescription =
48 get_property(root, "CODEGEN,description", NULL);
49 codegenvendor = get_property(root, "CODEGEN,vendor", NULL);
50
51 of_node_put(root);
52 }
53
54 if (codegendescription)
55 seq_printf(m, "machine\t\t: %s\n", codegendescription);
56 else
57 seq_printf(m, "machine\t\t: Efika\n");
58
59 if (revision)
60 seq_printf(m, "revision\t: %s\n", revision);
61
62 if (codegenvendor)
63 seq_printf(m, "vendor\t\t: %s\n", codegenvendor);
64
65 of_node_put(root);
66}
67
68static void __init efika_setup_arch(void)
69{
70 rtas_initialize();
71
72#ifdef CONFIG_BLK_DEV_INITRD
73 initrd_below_start_ok = 1;
74
75 if (initrd_start)
76 ROOT_DEV = Root_RAM0;
77 else
78#endif
79 ROOT_DEV = Root_SDA2; /* sda2 (sda1 is for the kernel) */
80
81 efika_pcisetup();
82
83 if (ppc_md.progress)
84 ppc_md.progress("Linux/PPC " UTS_RELEASE " runnung on Efika ;-)\n", 0x0);
85}
86
87static void __init efika_init(void)
88{
89 struct device_node *np;
90 struct device_node *cnp = NULL;
91 const u32 *base;
92
93 /* Find every child of the SOC node and add it to of_platform */
94 np = of_find_node_by_name(NULL, "builtin");
95 if (np) {
96 char name[BUS_ID_SIZE];
97 while ((cnp = of_get_next_child(np, cnp))) {
98 strcpy(name, cnp->name);
99
100 base = get_property(cnp, "reg", NULL);
101 if (base == NULL)
102 continue;
103
104 snprintf(name+strlen(name), BUS_ID_SIZE, "@%x", *base);
105 of_platform_device_create(cnp, name, NULL);
106
107 printk(KERN_INFO EFIKA_PLATFORM_NAME" : Added %s (type '%s' at '%s') to the known devices\n", name, cnp->type, cnp->full_name);
108 }
109 }
110
111 if (ppc_md.progress)
112 ppc_md.progress(" Have fun with your Efika! ", 0x7777);
113}
114
115static int __init efika_probe(void)
116{
117 char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
118 "model", NULL);
119
120 if (model == NULL)
121 return 0;
122 if (strcmp(model, "EFIKA5K2"))
123 return 0;
124
125 ISA_DMA_THRESHOLD = ~0L;
126 DMA_MODE_READ = 0x44;
127 DMA_MODE_WRITE = 0x48;
128
129 return 1;
130}
131
132define_machine(efika)
133{
134 .name = EFIKA_PLATFORM_NAME,
135 .probe = efika_probe,
136 .setup_arch = efika_setup_arch,
137 .init = efika_init,
138 .show_cpuinfo = efika_show_cpuinfo,
139 .init_IRQ = mpc52xx_init_irq,
140 .get_irq = mpc52xx_get_irq,
141 .restart = rtas_restart,
142 .power_off = rtas_power_off,
143 .halt = rtas_halt,
144 .set_rtc_time = rtas_set_rtc_time,
145 .get_rtc_time = rtas_get_rtc_time,
146 .progress = rtas_progress,
147 .get_boot_time = rtas_get_boot_time,
148 .calibrate_decr = generic_calibrate_decr,
149 .phys_mem_access_prot = pci_phys_mem_access_prot,
150};
diff --git a/arch/powerpc/platforms/52xx/efika.h b/arch/powerpc/platforms/52xx/efika.h
new file mode 100644
index 000000000000..2f060fd097d7
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika.h
@@ -0,0 +1,19 @@
1/*
2 * Efika 5K2 platform setup - Header file
3 *
4 * Copyright (C) 2006 bplan GmbH
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11
12#ifndef __ARCH_POWERPC_EFIKA__
13#define __ARCH_POWERPC_EFIKA__
14
15#define EFIKA_PLATFORM_NAME "Efika"
16
17extern void __init efika_pcisetup(void);
18
19#endif
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
new file mode 100644
index 000000000000..0f21bab33f6c
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -0,0 +1,161 @@
1/*
2 * Freescale Lite5200 board support
3 *
4 * Written by: Grant Likely <grant.likely@secretlab.ca>
5 *
6 * Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
7 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
8 *
9 * Description:
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#undef DEBUG
17
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/reboot.h>
23#include <linux/pci.h>
24#include <linux/kdev_t.h>
25#include <linux/major.h>
26#include <linux/console.h>
27#include <linux/delay.h>
28#include <linux/seq_file.h>
29#include <linux/root_dev.h>
30#include <linux/initrd.h>
31
32#include <asm/system.h>
33#include <asm/atomic.h>
34#include <asm/time.h>
35#include <asm/io.h>
36#include <asm/machdep.h>
37#include <asm/ipic.h>
38#include <asm/bootinfo.h>
39#include <asm/irq.h>
40#include <asm/prom.h>
41#include <asm/udbg.h>
42#include <sysdev/fsl_soc.h>
43#include <asm/of_platform.h>
44
45#include <asm/mpc52xx.h>
46
47/* ************************************************************************
48 *
49 * Setup the architecture
50 *
51 */
52
53static void __init
54lite52xx_setup_cpu(void)
55{
56 struct mpc52xx_gpio __iomem *gpio;
57 u32 port_config;
58
59 /* Map zones */
60 gpio = mpc52xx_find_and_map("mpc52xx-gpio");
61 if (!gpio) {
62 printk(KERN_ERR __FILE__ ": "
63 "Error while mapping GPIO register for port config. "
64 "Expect some abnormal behavior\n");
65 goto error;
66 }
67
68 /* Set port config */
69 port_config = in_be32(&gpio->port_config);
70
71 port_config &= ~0x00800000; /* 48Mhz internal, pin is GPIO */
72
73 port_config &= ~0x00007000; /* USB port : Differential mode */
74 port_config |= 0x00001000; /* USB 1 only */
75
76 port_config &= ~0x03000000; /* ATA CS is on csb_4/5 */
77 port_config |= 0x01000000;
78
79 pr_debug("port_config: old:%x new:%x\n",
80 in_be32(&gpio->port_config), port_config);
81 out_be32(&gpio->port_config, port_config);
82
83 /* Unmap zone */
84error:
85 iounmap(gpio);
86}
87
88static void __init lite52xx_setup_arch(void)
89{
90 struct device_node *np;
91
92 if (ppc_md.progress)
93 ppc_md.progress("lite52xx_setup_arch()", 0);
94
95 np = of_find_node_by_type(NULL, "cpu");
96 if (np) {
97 unsigned int *fp =
98 (int *)get_property(np, "clock-frequency", NULL);
99 if (fp != 0)
100 loops_per_jiffy = *fp / HZ;
101 else
102 loops_per_jiffy = 50000000 / HZ;
103 of_node_put(np);
104 }
105
106 /* CPU & Port mux setup */
107 mpc52xx_setup_cpu(); /* Generic */
108 lite52xx_setup_cpu(); /* Platorm specific */
109
110#ifdef CONFIG_BLK_DEV_INITRD
111 if (initrd_start)
112 ROOT_DEV = Root_RAM0;
113 else
114#endif
115#ifdef CONFIG_ROOT_NFS
116 ROOT_DEV = Root_NFS;
117#else
118 ROOT_DEV = Root_HDA1;
119#endif
120
121}
122
123void lite52xx_show_cpuinfo(struct seq_file *m)
124{
125 struct device_node* np = of_find_all_nodes(NULL);
126 const char *model = NULL;
127
128 if (np)
129 model = get_property(np, "model", NULL);
130
131 seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
132 seq_printf(m, "machine\t\t: %s\n", model ? model : "unknown");
133
134 of_node_put(np);
135}
136
137/*
138 * Called very early, MMU is off, device-tree isn't unflattened
139 */
140static int __init lite52xx_probe(void)
141{
142 unsigned long node = of_get_flat_dt_root();
143 const char *model = of_get_flat_dt_prop(node, "model", NULL);
144
145 if (!of_flat_dt_is_compatible(node, "lite52xx"))
146 return 0;
147 pr_debug("%s board w/ mpc52xx found\n", model ? model : "unknown");
148
149 return 1;
150}
151
152define_machine(lite52xx) {
153 .name = "lite52xx",
154 .probe = lite52xx_probe,
155 .setup_arch = lite52xx_setup_arch,
156 .init = mpc52xx_declare_of_platform_devices,
157 .init_IRQ = mpc52xx_init_irq,
158 .get_irq = mpc52xx_get_irq,
159 .show_cpuinfo = lite52xx_show_cpuinfo,
160 .calibrate_decr = generic_calibrate_decr,
161};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
new file mode 100644
index 000000000000..cc40889074bd
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -0,0 +1,127 @@
1/*
2 *
3 * Utility functions for the Freescale MPC52xx.
4 *
5 * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.
10 *
11 */
12
13#undef DEBUG
14
15#include <linux/kernel.h>
16
17#include <asm/io.h>
18#include <asm/prom.h>
19#include <asm/of_platform.h>
20#include <asm/mpc52xx.h>
21
22
23void __iomem *
24mpc52xx_find_and_map(const char *compatible)
25{
26 struct device_node *ofn;
27 const u32 *regaddr_p;
28 u64 regaddr64, size64;
29
30 ofn = of_find_compatible_node(NULL, NULL, compatible);
31 if (!ofn)
32 return NULL;
33
34 regaddr_p = of_get_address(ofn, 0, &size64, NULL);
35 if (!regaddr_p) {
36 of_node_put(ofn);
37 return NULL;
38 }
39
40 regaddr64 = of_translate_address(ofn, regaddr_p);
41
42 of_node_put(ofn);
43
44 return ioremap((u32)regaddr64, (u32)size64);
45}
46EXPORT_SYMBOL(mpc52xx_find_and_map);
47
48
49/**
50 * mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
51 * @node: device node
52 *
53 * Returns IPB bus frequency, or 0 if the bus frequency cannot be found.
54 */
55unsigned int
56mpc52xx_find_ipb_freq(struct device_node *node)
57{
58 struct device_node *np;
59 const unsigned int *p_ipb_freq = NULL;
60
61 of_node_get(node);
62 while (node) {
63 p_ipb_freq = get_property(node, "bus-frequency", NULL);
64 if (p_ipb_freq)
65 break;
66
67 np = of_get_parent(node);
68 of_node_put(node);
69 node = np;
70 }
71 if (node)
72 of_node_put(node);
73
74 return p_ipb_freq ? *p_ipb_freq : 0;
75}
76EXPORT_SYMBOL(mpc52xx_find_ipb_freq);
77
78
79void __init
80mpc52xx_setup_cpu(void)
81{
82 struct mpc52xx_cdm __iomem *cdm;
83 struct mpc52xx_xlb __iomem *xlb;
84
85 /* Map zones */
86 cdm = mpc52xx_find_and_map("mpc52xx-cdm");
87 xlb = mpc52xx_find_and_map("mpc52xx-xlb");
88
89 if (!cdm || !xlb) {
90 printk(KERN_ERR __FILE__ ": "
91 "Error while mapping CDM/XLB during mpc52xx_setup_cpu. "
92 "Expect some abnormal behavior\n");
93 goto unmap_regs;
94 }
95
96 /* Use internal 48 Mhz */
97 out_8(&cdm->ext_48mhz_en, 0x00);
98 out_8(&cdm->fd_enable, 0x01);
99 if (in_be32(&cdm->rstcfg) & 0x40) /* Assumes 33Mhz clock */
100 out_be16(&cdm->fd_counters, 0x0001);
101 else
102 out_be16(&cdm->fd_counters, 0x5555);
103
104 /* Configure the XLB Arbiter priorities */
105 out_be32(&xlb->master_pri_enable, 0xff);
106 out_be32(&xlb->master_priority, 0x11111111);
107
108 /* Disable XLB pipelining */
109 /* (cfr errate 292. We could do this only just before ATA PIO
110 transaction and re-enable it afterwards ...) */
111 out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
112
113 /* Unmap zones */
114unmap_regs:
115 if (cdm) iounmap(cdm);
116 if (xlb) iounmap(xlb);
117}
118
119void __init
120mpc52xx_declare_of_platform_devices(void)
121{
122 /* Find every child of the SOC node and add it to of_platform */
123 if (of_platform_bus_probe(NULL, NULL, NULL))
124 printk(KERN_ERR __FILE__ ": "
125 "Error while probing of_platform bus\n");
126}
127
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
new file mode 100644
index 000000000000..cd91a6c3aafa
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -0,0 +1,473 @@
1/*
2 *
3 * Programmable Interrupt Controller functions for the Freescale MPC52xx.
4 *
5 * Copyright (C) 2006 bplan GmbH
6 *
7 * Based on the code from the 2.4 kernel by
8 * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
9 *
10 * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
11 * Copyright (C) 2003 Montavista Software, Inc
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 *
17 */
18
19#undef DEBUG
20
21#include <linux/stddef.h>
22#include <linux/init.h>
23#include <linux/sched.h>
24#include <linux/signal.h>
25#include <linux/stddef.h>
26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/hardirq.h>
29
30#include <asm/io.h>
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/irq.h>
34#include <asm/prom.h>
35#include <asm/mpc52xx.h>
36#include "mpc52xx_pic.h"
37
38/*
39 *
40*/
41
42static struct mpc52xx_intr __iomem *intr;
43static struct mpc52xx_sdma __iomem *sdma;
44static struct irq_host *mpc52xx_irqhost = NULL;
45
46static unsigned char mpc52xx_map_senses[4] = {
47 IRQ_TYPE_LEVEL_HIGH,
48 IRQ_TYPE_EDGE_RISING,
49 IRQ_TYPE_EDGE_FALLING,
50 IRQ_TYPE_LEVEL_LOW,
51};
52
53/*
54 *
55*/
56
57static inline void io_be_setbit(u32 __iomem *addr, int bitno)
58{
59 out_be32(addr, in_be32(addr) | (1 << bitno));
60}
61
62static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
63{
64 out_be32(addr, in_be32(addr) & ~(1 << bitno));
65}
66
67/*
68 * IRQ[0-3] interrupt irq_chip
69*/
70
71static void mpc52xx_extirq_mask(unsigned int virq)
72{
73 int irq;
74 int l2irq;
75
76 irq = irq_map[virq].hwirq;
77 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
78
79 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
80
81 io_be_clrbit(&intr->ctrl, 11 - l2irq);
82}
83
84static void mpc52xx_extirq_unmask(unsigned int virq)
85{
86 int irq;
87 int l2irq;
88
89 irq = irq_map[virq].hwirq;
90 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
91
92 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
93
94 io_be_setbit(&intr->ctrl, 11 - l2irq);
95}
96
97static void mpc52xx_extirq_ack(unsigned int virq)
98{
99 int irq;
100 int l2irq;
101
102 irq = irq_map[virq].hwirq;
103 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
104
105 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
106
107 io_be_setbit(&intr->ctrl, 27-l2irq);
108}
109
110static struct irq_chip mpc52xx_extirq_irqchip = {
111 .typename = " MPC52xx IRQ[0-3] ",
112 .mask = mpc52xx_extirq_mask,
113 .unmask = mpc52xx_extirq_unmask,
114 .ack = mpc52xx_extirq_ack,
115};
116
117/*
118 * Main interrupt irq_chip
119*/
120
121static void mpc52xx_main_mask(unsigned int virq)
122{
123 int irq;
124 int l2irq;
125
126 irq = irq_map[virq].hwirq;
127 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
128
129 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
130
131 io_be_setbit(&intr->main_mask, 15 - l2irq);
132}
133
134static void mpc52xx_main_unmask(unsigned int virq)
135{
136 int irq;
137 int l2irq;
138
139 irq = irq_map[virq].hwirq;
140 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
141
142 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
143
144 io_be_clrbit(&intr->main_mask, 15 - l2irq);
145}
146
147static struct irq_chip mpc52xx_main_irqchip = {
148 .typename = "MPC52xx Main",
149 .mask = mpc52xx_main_mask,
150 .mask_ack = mpc52xx_main_mask,
151 .unmask = mpc52xx_main_unmask,
152};
153
154/*
155 * Peripherals interrupt irq_chip
156*/
157
158static void mpc52xx_periph_mask(unsigned int virq)
159{
160 int irq;
161 int l2irq;
162
163 irq = irq_map[virq].hwirq;
164 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
165
166 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
167
168 io_be_setbit(&intr->per_mask, 31 - l2irq);
169}
170
171static void mpc52xx_periph_unmask(unsigned int virq)
172{
173 int irq;
174 int l2irq;
175
176 irq = irq_map[virq].hwirq;
177 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
178
179 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
180
181 io_be_clrbit(&intr->per_mask, 31 - l2irq);
182}
183
184static struct irq_chip mpc52xx_periph_irqchip = {
185 .typename = "MPC52xx Peripherals",
186 .mask = mpc52xx_periph_mask,
187 .mask_ack = mpc52xx_periph_mask,
188 .unmask = mpc52xx_periph_unmask,
189};
190
191/*
192 * SDMA interrupt irq_chip
193*/
194
195static void mpc52xx_sdma_mask(unsigned int virq)
196{
197 int irq;
198 int l2irq;
199
200 irq = irq_map[virq].hwirq;
201 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
202
203 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
204
205 io_be_setbit(&sdma->IntMask, l2irq);
206}
207
208static void mpc52xx_sdma_unmask(unsigned int virq)
209{
210 int irq;
211 int l2irq;
212
213 irq = irq_map[virq].hwirq;
214 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
215
216 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
217
218 io_be_clrbit(&sdma->IntMask, l2irq);
219}
220
221static void mpc52xx_sdma_ack(unsigned int virq)
222{
223 int irq;
224 int l2irq;
225
226 irq = irq_map[virq].hwirq;
227 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
228
229 pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
230
231 out_be32(&sdma->IntPend, 1 << l2irq);
232}
233
234static struct irq_chip mpc52xx_sdma_irqchip = {
235 .typename = "MPC52xx SDMA",
236 .mask = mpc52xx_sdma_mask,
237 .unmask = mpc52xx_sdma_unmask,
238 .ack = mpc52xx_sdma_ack,
239};
240
241/*
242 * irq_host
243*/
244
245static int mpc52xx_irqhost_match(struct irq_host *h, struct device_node *node)
246{
247 pr_debug("%s: node=%p\n", __func__, node);
248 return mpc52xx_irqhost->host_data == node;
249}
250
251static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
252 u32 * intspec, unsigned int intsize,
253 irq_hw_number_t * out_hwirq,
254 unsigned int *out_flags)
255{
256 int intrvect_l1;
257 int intrvect_l2;
258 int intrvect_type;
259 int intrvect_linux;
260
261 if (intsize != 3)
262 return -1;
263
264 intrvect_l1 = (int)intspec[0];
265 intrvect_l2 = (int)intspec[1];
266 intrvect_type = (int)intspec[2];
267
268 intrvect_linux =
269 (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
270 intrvect_linux |=
271 (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
272
273 pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
274 intrvect_l2);
275
276 *out_hwirq = intrvect_linux;
277 *out_flags = mpc52xx_map_senses[intrvect_type];
278
279 return 0;
280}
281
282/*
283 * this function retrieves the correct IRQ type out
284 * of the MPC regs
285 * Only externals IRQs needs this
286*/
287static int mpc52xx_irqx_gettype(int irq)
288{
289 int type;
290 u32 ctrl_reg;
291
292 ctrl_reg = in_be32(&intr->ctrl);
293 type = (ctrl_reg >> (22 - irq * 2)) & 0x3;
294
295 return mpc52xx_map_senses[type];
296}
297
298static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
299 irq_hw_number_t irq)
300{
301 int l1irq;
302 int l2irq;
303 struct irq_chip *good_irqchip;
304 void *good_handle;
305 int type;
306
307 l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
308 l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
309
310 /*
311 * Most of ours IRQs will be level low
312 * Only external IRQs on some platform may be others
313 */
314 type = IRQ_TYPE_LEVEL_LOW;
315
316 switch (l1irq) {
317 case MPC52xx_IRQ_L1_CRIT:
318 pr_debug("%s: Critical. l2=%x\n", __func__, l2irq);
319
320 BUG_ON(l2irq != 0);
321
322 type = mpc52xx_irqx_gettype(l2irq);
323 good_irqchip = &mpc52xx_extirq_irqchip;
324 break;
325
326 case MPC52xx_IRQ_L1_MAIN:
327 pr_debug("%s: Main IRQ[1-3] l2=%x\n", __func__, l2irq);
328
329 if ((l2irq >= 1) && (l2irq <= 3)) {
330 type = mpc52xx_irqx_gettype(l2irq);
331 good_irqchip = &mpc52xx_extirq_irqchip;
332 } else {
333 good_irqchip = &mpc52xx_main_irqchip;
334 }
335 break;
336
337 case MPC52xx_IRQ_L1_PERP:
338 pr_debug("%s: Peripherals. l2=%x\n", __func__, l2irq);
339 good_irqchip = &mpc52xx_periph_irqchip;
340 break;
341
342 case MPC52xx_IRQ_L1_SDMA:
343 pr_debug("%s: SDMA. l2=%x\n", __func__, l2irq);
344 good_irqchip = &mpc52xx_sdma_irqchip;
345 break;
346
347 default:
348 pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
349 printk(KERN_ERR "Unknow IRQ!\n");
350 return -EINVAL;
351 }
352
353 switch (type) {
354 case IRQ_TYPE_EDGE_FALLING:
355 case IRQ_TYPE_EDGE_RISING:
356 good_handle = handle_edge_irq;
357 break;
358 default:
359 good_handle = handle_level_irq;
360 }
361
362 set_irq_chip_and_handler(virq, good_irqchip, good_handle);
363
364 pr_debug("%s: virq=%x, hw=%x. type=%x\n", __func__, virq,
365 (int)irq, type);
366
367 return 0;
368}
369
370static struct irq_host_ops mpc52xx_irqhost_ops = {
371 .match = mpc52xx_irqhost_match,
372 .xlate = mpc52xx_irqhost_xlate,
373 .map = mpc52xx_irqhost_map,
374};
375
376/*
377 * init (public)
378*/
379
380void __init mpc52xx_init_irq(void)
381{
382 u32 intr_ctrl;
383 struct device_node *picnode;
384
385 /* Remap the necessary zones */
386 picnode = of_find_compatible_node(NULL, NULL, "mpc52xx-pic");
387
388 intr = mpc52xx_find_and_map("mpc52xx-pic");
389 if (!intr)
390 panic(__FILE__ ": find_and_map failed on 'mpc52xx-pic'. "
391 "Check node !");
392
393 sdma = mpc52xx_find_and_map("mpc52xx-bestcomm");
394 if (!sdma)
395 panic(__FILE__ ": find_and_map failed on 'mpc52xx-bestcomm'. "
396 "Check node !");
397
398 /* Disable all interrupt sources. */
399 out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
400 out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
401 out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
402 out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
403 intr_ctrl = in_be32(&intr->ctrl);
404 intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */
405 intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */
406 0x00001000 | /* MEE master external enable */
407 0x00000000 | /* 0 means disable IRQ 0-3 */
408 0x00000001; /* CEb route critical normally */
409 out_be32(&intr->ctrl, intr_ctrl);
410
411 /* Zero a bunch of the priority settings. */
412 out_be32(&intr->per_pri1, 0);
413 out_be32(&intr->per_pri2, 0);
414 out_be32(&intr->per_pri3, 0);
415 out_be32(&intr->main_pri1, 0);
416 out_be32(&intr->main_pri2, 0);
417
418 /*
419 * As last step, add an irq host to translate the real
420 * hw irq information provided by the ofw to linux virq
421 */
422
423 mpc52xx_irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
424 MPC52xx_IRQ_HIGHTESTHWIRQ,
425 &mpc52xx_irqhost_ops, -1);
426
427 if (!mpc52xx_irqhost)
428 panic(__FILE__ ": Cannot allocate the IRQ host\n");
429
430 mpc52xx_irqhost->host_data = picnode;
431 printk(KERN_INFO "MPC52xx PIC is up and running!\n");
432}
433
434/*
435 * get_irq (public)
436*/
437unsigned int mpc52xx_get_irq(void)
438{
439 u32 status;
440 int irq = NO_IRQ_IGNORE;
441
442 status = in_be32(&intr->enc_status);
443 if (status & 0x00000400) { /* critical */
444 irq = (status >> 8) & 0x3;
445 if (irq == 2) /* high priority peripheral */
446 goto peripheral;
447 irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
448 MPC52xx_IRQ_L1_MASK;
449 } else if (status & 0x00200000) { /* main */
450 irq = (status >> 16) & 0x1f;
451 if (irq == 4) /* low priority peripheral */
452 goto peripheral;
453 irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
454 MPC52xx_IRQ_L1_MASK;
455 } else if (status & 0x20000000) { /* peripheral */
456 peripheral:
457 irq = (status >> 24) & 0x1f;
458 if (irq == 0) { /* bestcomm */
459 status = in_be32(&sdma->IntPend);
460 irq = ffs(status) - 1;
461 irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
462 MPC52xx_IRQ_L1_MASK;
463 } else {
464 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
465 MPC52xx_IRQ_L1_MASK;
466 }
467 }
468
469 pr_debug("%s: irq=%x. virq=%d\n", __func__, irq,
470 irq_linear_revmap(mpc52xx_irqhost, irq));
471
472 return irq_linear_revmap(mpc52xx_irqhost, irq);
473}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
new file mode 100644
index 000000000000..1a26bcdb3049
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
@@ -0,0 +1,53 @@
1/*
2 * Header file for Freescale MPC52xx Interrupt controller
3 *
4 * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
5 * Copyright (C) 2003 MontaVista, Software, Inc.
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.
10 */
11
12#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
13#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
14
15#include <asm/types.h>
16
17
18/* HW IRQ mapping */
19#define MPC52xx_IRQ_L1_CRIT (0)
20#define MPC52xx_IRQ_L1_MAIN (1)
21#define MPC52xx_IRQ_L1_PERP (2)
22#define MPC52xx_IRQ_L1_SDMA (3)
23
24#define MPC52xx_IRQ_L1_OFFSET (6)
25#define MPC52xx_IRQ_L1_MASK (0x00c0)
26
27#define MPC52xx_IRQ_L2_OFFSET (0)
28#define MPC52xx_IRQ_L2_MASK (0x003f)
29
30#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
31
32
33/* Interrupt controller Register set */
34struct mpc52xx_intr {
35 u32 per_mask; /* INTR + 0x00 */
36 u32 per_pri1; /* INTR + 0x04 */
37 u32 per_pri2; /* INTR + 0x08 */
38 u32 per_pri3; /* INTR + 0x0c */
39 u32 ctrl; /* INTR + 0x10 */
40 u32 main_mask; /* INTR + 0x14 */
41 u32 main_pri1; /* INTR + 0x18 */
42 u32 main_pri2; /* INTR + 0x1c */
43 u32 reserved1; /* INTR + 0x20 */
44 u32 enc_status; /* INTR + 0x24 */
45 u32 crit_status; /* INTR + 0x28 */
46 u32 main_status; /* INTR + 0x2c */
47 u32 per_status; /* INTR + 0x30 */
48 u32 reserved2; /* INTR + 0x34 */
49 u32 per_error; /* INTR + 0x38 */
50};
51
52#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
53
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
index bb9acbb98176..ea880f1f0dcd 100644
--- a/arch/powerpc/platforms/82xx/mpc82xx_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -515,16 +515,6 @@ static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
515 return PCIBIOS_SUCCESSFUL; 515 return PCIBIOS_SUCCESSFUL;
516} 516}
517 517
518static void
519__init mpc82xx_pcibios_fixup(void)
520{
521 struct pci_dev *dev = NULL;
522
523 for_each_pci_dev(dev) {
524 pci_read_irq_line(dev);
525 }
526}
527
528void __init add_bridge(struct device_node *np) 518void __init add_bridge(struct device_node *np)
529{ 519{
530 int len; 520 int len;
@@ -597,9 +587,6 @@ static void __init mpc82xx_ads_setup_arch(void)
597 add_bridge(np); 587 add_bridge(np);
598 588
599 of_node_put(np); 589 of_node_put(np);
600 ppc_md.pci_map_irq = NULL;
601 ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
602 ppc_md.pcibios_fixup_bus = NULL;
603#endif 590#endif
604 591
605#ifdef CONFIG_ROOT_NFS 592#ifdef CONFIG_ROOT_NFS
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 7edb6b461382..edcd5b875b66 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -21,7 +21,7 @@ config MPC834x_SYS
21 Be aware that PCI buses can only function when SYS board is plugged 21 Be aware that PCI buses can only function when SYS board is plugged
22 into the PIB (Platform IO Board) board from Freescale which provide 22 into the PIB (Platform IO Board) board from Freescale which provide
23 3 PCI slots. The PIBs PCI initialization is the bootloader's 23 3 PCI slots. The PIBs PCI initialization is the bootloader's
24 responsiblilty. 24 responsibility.
25 25
26config MPC834x_ITX 26config MPC834x_ITX
27 bool "Freescale MPC834x ITX" 27 bool "Freescale MPC834x ITX"
@@ -30,7 +30,7 @@ config MPC834x_ITX
30 This option enables support for the MPC 834x ITX evaluation board. 30 This option enables support for the MPC 834x ITX evaluation board.
31 31
32 Be aware that PCI initialization is the bootloader's 32 Be aware that PCI initialization is the bootloader's
33 responsiblilty. 33 responsibility.
34 34
35config MPC8360E_PB 35config MPC8360E_PB
36 bool "Freescale MPC8360E PB" 36 bool "Freescale MPC8360E PB"
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index a43ac71ab740..f58c9780b66f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -97,8 +97,6 @@ static void __init mpc832x_sys_setup_arch(void)
97#ifdef CONFIG_PCI 97#ifdef CONFIG_PCI
98 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 98 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
99 add_bridge(np); 99 add_bridge(np);
100
101 ppc_md.pci_swizzle = common_swizzle;
102 ppc_md.pci_exclude_device = mpc83xx_exclude_device; 100 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
103#endif 101#endif
104 102
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index e2bcaaf6b329..314c42ac6048 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -118,7 +118,4 @@ define_machine(mpc834x_itx) {
118 .time_init = mpc83xx_time_init, 118 .time_init = mpc83xx_time_init,
119 .calibrate_decr = generic_calibrate_decr, 119 .calibrate_decr = generic_calibrate_decr,
120 .progress = udbg_progress, 120 .progress = udbg_progress,
121#ifdef CONFIG_PCI
122 .pcibios_fixup = mpc83xx_pcibios_fixup,
123#endif
124}; 121};
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 677196187a4e..80b735a414d9 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -137,7 +137,4 @@ define_machine(mpc834x_sys) {
137 .time_init = mpc83xx_time_init, 137 .time_init = mpc83xx_time_init,
138 .calibrate_decr = generic_calibrate_decr, 138 .calibrate_decr = generic_calibrate_decr,
139 .progress = udbg_progress, 139 .progress = udbg_progress,
140#ifdef CONFIG_PCI
141 .pcibios_fixup = mpc83xx_pcibios_fixup,
142#endif
143}; 140};
diff --git a/arch/powerpc/platforms/83xx/mpc8360e_pb.c b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
index 1a523c81c06e..7bfd47ad7233 100644
--- a/arch/powerpc/platforms/83xx/mpc8360e_pb.c
+++ b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
@@ -102,8 +102,6 @@ static void __init mpc8360_sys_setup_arch(void)
102#ifdef CONFIG_PCI 102#ifdef CONFIG_PCI
103 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 103 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
104 add_bridge(np); 104 add_bridge(np);
105
106 ppc_md.pci_swizzle = common_swizzle;
107 ppc_md.pci_exclude_device = mpc83xx_exclude_device; 105 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
108#endif 106#endif
109 107
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2c82bca9bfbb..01cae106912b 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -11,7 +11,6 @@
11 11
12extern int add_bridge(struct device_node *dev); 12extern int add_bridge(struct device_node *dev);
13extern int mpc83xx_exclude_device(u_char bus, u_char devfn); 13extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
14extern void mpc83xx_pcibios_fixup(void);
15extern void mpc83xx_restart(char *cmd); 14extern void mpc83xx_restart(char *cmd);
16extern long mpc83xx_time_init(void); 15extern long mpc83xx_time_init(void);
17 16
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 4557ac5255c1..9c3650555144 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -45,15 +45,6 @@ int mpc83xx_exclude_device(u_char bus, u_char devfn)
45 return PCIBIOS_SUCCESSFUL; 45 return PCIBIOS_SUCCESSFUL;
46} 46}
47 47
48void __init mpc83xx_pcibios_fixup(void)
49{
50 struct pci_dev *dev = NULL;
51
52 /* map all the PCI irqs */
53 for_each_pci_dev(dev)
54 pci_read_irq_line(dev);
55}
56
57int __init add_bridge(struct device_node *dev) 48int __init add_bridge(struct device_node *dev)
58{ 49{
59 int len; 50 int len;
diff --git a/arch/powerpc/platforms/85xx/misc.c b/arch/powerpc/platforms/85xx/misc.c
index 26c5e822c7c8..3e62fcb04c1c 100644
--- a/arch/powerpc/platforms/85xx/misc.c
+++ b/arch/powerpc/platforms/85xx/misc.c
@@ -21,11 +21,3 @@ void mpc85xx_restart(char *cmd)
21 local_irq_disable(); 21 local_irq_disable();
22 abort(); 22 abort();
23} 23}
24
25/* For now this is a pass through */
26phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
27{
28 return addr;
29};
30
31EXPORT_SYMBOL(fixup_bigphys_addr);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index d3e669d69c73..bda2e55e6c4c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -53,15 +53,6 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
53 else 53 else
54 return PCIBIOS_SUCCESSFUL; 54 return PCIBIOS_SUCCESSFUL;
55} 55}
56
57void __init
58mpc85xx_pcibios_fixup(void)
59{
60 struct pci_dev *dev = NULL;
61
62 for_each_pci_dev(dev)
63 pci_read_irq_line(dev);
64}
65#endif /* CONFIG_PCI */ 56#endif /* CONFIG_PCI */
66 57
67#ifdef CONFIG_CPM2 58#ifdef CONFIG_CPM2
@@ -253,8 +244,6 @@ static void __init mpc85xx_ads_setup_arch(void)
253#ifdef CONFIG_PCI 244#ifdef CONFIG_PCI
254 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 245 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
255 add_bridge(np); 246 add_bridge(np);
256
257 ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
258 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 247 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
259#endif 248#endif
260 249
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 1a1c226ad4d9..f4dd5f2f8a28 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -398,15 +398,6 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
398} 398}
399 399
400 400
401void __init mpc86xx_hpcn_pcibios_fixup(void)
402{
403 struct pci_dev *dev = NULL;
404
405 for_each_pci_dev(dev)
406 pci_read_irq_line(dev);
407}
408
409
410/* 401/*
411 * Called very early, device-tree isn't unflattened 402 * Called very early, device-tree isn't unflattened
412 */ 403 */
@@ -461,7 +452,6 @@ define_machine(mpc86xx_hpcn) {
461 .setup_arch = mpc86xx_hpcn_setup_arch, 452 .setup_arch = mpc86xx_hpcn_setup_arch,
462 .init_IRQ = mpc86xx_hpcn_init_irq, 453 .init_IRQ = mpc86xx_hpcn_init_irq,
463 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, 454 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
464 .pcibios_fixup = mpc86xx_hpcn_pcibios_fixup,
465 .get_irq = mpic_get_irq, 455 .get_irq = mpic_get_irq,
466 .restart = mpc86xx_restart, 456 .restart = mpc86xx_restart,
467 .time_init = mpc86xx_time_init, 457 .time_init = mpc86xx_time_init,
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index e58fa953a50b..507d1b98f270 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -5,6 +5,7 @@ ifeq ($(CONFIG_PPC64),y)
5obj-$(CONFIG_PPC_PMAC) += powermac/ 5obj-$(CONFIG_PPC_PMAC) += powermac/
6endif 6endif
7endif 7endif
8obj-$(CONFIG_PPC_MPC52xx) += 52xx/
8obj-$(CONFIG_PPC_CHRP) += chrp/ 9obj-$(CONFIG_PPC_CHRP) += chrp/
9obj-$(CONFIG_4xx) += 4xx/ 10obj-$(CONFIG_4xx) += 4xx/
10obj-$(CONFIG_PPC_83xx) += 83xx/ 11obj-$(CONFIG_PPC_83xx) += 83xx/
@@ -13,6 +14,7 @@ obj-$(CONFIG_PPC_86xx) += 86xx/
13obj-$(CONFIG_PPC_PSERIES) += pseries/ 14obj-$(CONFIG_PPC_PSERIES) += pseries/
14obj-$(CONFIG_PPC_ISERIES) += iseries/ 15obj-$(CONFIG_PPC_ISERIES) += iseries/
15obj-$(CONFIG_PPC_MAPLE) += maple/ 16obj-$(CONFIG_PPC_MAPLE) += maple/
16obj-$(CONFIG_PPC_PASEMI) += pasemi/ 17obj-$(CONFIG_PPC_PASEMI) += pasemi/
17obj-$(CONFIG_PPC_CELL) += cell/ 18obj-$(CONFIG_PPC_CELL) += cell/
19obj-$(CONFIG_PPC_PS3) += ps3/
18obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 20obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3e430b489bb7..06a85b704331 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -20,4 +20,18 @@ config CBE_RAS
20 bool "RAS features for bare metal Cell BE" 20 bool "RAS features for bare metal Cell BE"
21 default y 21 default y
22 22
23config CBE_THERM
24 tristate "CBE thermal support"
25 default m
26 depends on CBE_RAS
27
28config CBE_CPUFREQ
29 tristate "CBE frequency scaling"
30 depends on CBE_RAS && CPU_FREQ
31 default m
32 help
33 This adds the cpufreq driver for Cell BE processors.
34 For details, take a look at <file:Documentation/cpu-freq/>.
35 If you don't have such processor, say N
36
23endmenu 37endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index c89cdd67383b..f90e8337796c 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,11 @@
1obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \ 1obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
2 cbe_regs.o spider-pic.o pervasive.o 2 cbe_regs.o spider-pic.o \
3 pervasive.o pmu.o io-workarounds.o
3obj-$(CONFIG_CBE_RAS) += ras.o 4obj-$(CONFIG_CBE_RAS) += ras.o
4 5
6obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
7obj-$(CONFIG_CBE_CPUFREQ) += cbe_cpufreq.o
8
5ifeq ($(CONFIG_SMP),y) 9ifeq ($(CONFIG_SMP),y)
6obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o 10obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
7endif 11endif
@@ -11,5 +15,6 @@ spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
11spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o 15spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
12 16
13obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 17obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
18 spu_coredump.o \
14 $(spufs-modular-m) \ 19 $(spufs-modular-m) \
15 $(spu-priv1-y) spufs/ 20 $(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644
index 000000000000..a3850fd1e94c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -0,0 +1,248 @@
1/*
2 * cpufreq driver for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/cpufreq.h>
24#include <linux/timer.h>
25
26#include <asm/hw_irq.h>
27#include <asm/io.h>
28#include <asm/processor.h>
29#include <asm/prom.h>
30#include <asm/time.h>
31
32#include "cbe_regs.h"
33
34static DEFINE_MUTEX(cbe_switch_mutex);
35
36
37/* the CBE supports an 8 step frequency scaling */
38static struct cpufreq_frequency_table cbe_freqs[] = {
39 {1, 0},
40 {2, 0},
41 {3, 0},
42 {4, 0},
43 {5, 0},
44 {6, 0},
45 {8, 0},
46 {10, 0},
47 {0, CPUFREQ_TABLE_END},
48};
49
50/* to write to MIC register */
51static u64 MIC_Slow_Fast_Timer_table[] = {
52 [0 ... 7] = 0x007fc00000000000ull,
53};
54
55/* more values for the MIC */
56static u64 MIC_Slow_Next_Timer_table[] = {
57 0x0000240000000000ull,
58 0x0000268000000000ull,
59 0x000029C000000000ull,
60 0x00002D0000000000ull,
61 0x0000300000000000ull,
62 0x0000334000000000ull,
63 0x000039C000000000ull,
64 0x00003FC000000000ull,
65};
66
67/*
68 * hardware specific functions
69 */
70
71static int get_pmode(int cpu)
72{
73 int ret;
74 struct cbe_pmd_regs __iomem *pmd_regs;
75
76 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
77 ret = in_be64(&pmd_regs->pmsr) & 0x07;
78
79 return ret;
80}
81
82static int set_pmode(int cpu, unsigned int pmode)
83{
84 struct cbe_pmd_regs __iomem *pmd_regs;
85 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
86 u64 flags;
87 u64 value;
88
89 local_irq_save(flags);
90
91 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
92 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
93
94 pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
95 pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
96
97 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
98 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
99
100 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
101 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
102
103 value = in_be64(&pmd_regs->pmcr);
104 /* set bits to zero */
105 value &= 0xFFFFFFFFFFFFFFF8ull;
106 /* set bits to next pmode */
107 value |= pmode;
108
109 out_be64(&pmd_regs->pmcr, value);
110
111 /* wait until new pmode appears in status register */
112 value = in_be64(&pmd_regs->pmsr) & 0x07;
113 while(value != pmode) {
114 cpu_relax();
115 value = in_be64(&pmd_regs->pmsr) & 0x07;
116 }
117
118 local_irq_restore(flags);
119
120 return 0;
121}
122
123/*
124 * cpufreq functions
125 */
126
127static int cbe_cpufreq_cpu_init (struct cpufreq_policy *policy)
128{
129 u32 *max_freq;
130 int i, cur_pmode;
131 struct device_node *cpu;
132
133 cpu = of_get_cpu_node(policy->cpu, NULL);
134
135 if(!cpu)
136 return -ENODEV;
137
138 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
139
140 max_freq = (u32*) get_property(cpu, "clock-frequency", NULL);
141
142 if(!max_freq)
143 return -EINVAL;
144
145 // we need the freq in kHz
146 *max_freq /= 1000;
147
148 pr_debug("max clock-frequency is at %u kHz\n", *max_freq);
149 pr_debug("initializing frequency table\n");
150
151 // initialize frequency table
152 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
153 cbe_freqs[i].frequency = *max_freq / cbe_freqs[i].index;
154 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
155 }
156
157 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
158 /* if DEBUG is enabled set_pmode() measures the correct latency of a transition */
159 policy->cpuinfo.transition_latency = 25000;
160
161 cur_pmode = get_pmode(policy->cpu);
162 pr_debug("current pmode is at %d\n",cur_pmode);
163
164 policy->cur = cbe_freqs[cur_pmode].frequency;
165
166#ifdef CONFIG_SMP
167 policy->cpus = cpu_sibling_map[policy->cpu];
168#endif
169
170 cpufreq_frequency_table_get_attr (cbe_freqs, policy->cpu);
171
172 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
173 return cpufreq_frequency_table_cpuinfo (policy, cbe_freqs);
174}
175
176static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
177{
178 cpufreq_frequency_table_put_attr(policy->cpu);
179 return 0;
180}
181
182static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
183{
184 return cpufreq_frequency_table_verify(policy, cbe_freqs);
185}
186
187
188static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq,
189 unsigned int relation)
190{
191 int rc;
192 struct cpufreq_freqs freqs;
193 int cbe_pmode_new;
194
195 cpufreq_frequency_table_target(policy,
196 cbe_freqs,
197 target_freq,
198 relation,
199 &cbe_pmode_new);
200
201 freqs.old = policy->cur;
202 freqs.new = cbe_freqs[cbe_pmode_new].frequency;
203 freqs.cpu = policy->cpu;
204
205 mutex_lock (&cbe_switch_mutex);
206 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
207
208 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
209 policy->cpu,
210 cbe_freqs[cbe_pmode_new].frequency,
211 cbe_freqs[cbe_pmode_new].index);
212
213 rc = set_pmode(policy->cpu, cbe_pmode_new);
214 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
215 mutex_unlock(&cbe_switch_mutex);
216
217 return rc;
218}
219
220static struct cpufreq_driver cbe_cpufreq_driver = {
221 .verify = cbe_cpufreq_verify,
222 .target = cbe_cpufreq_target,
223 .init = cbe_cpufreq_cpu_init,
224 .exit = cbe_cpufreq_cpu_exit,
225 .name = "cbe-cpufreq",
226 .owner = THIS_MODULE,
227 .flags = CPUFREQ_CONST_LOOPS,
228};
229
230/*
231 * module init and destoy
232 */
233
234static int __init cbe_cpufreq_init(void)
235{
236 return cpufreq_register_driver(&cbe_cpufreq_driver);
237}
238
239static void __exit cbe_cpufreq_exit(void)
240{
241 cpufreq_unregister_driver(&cbe_cpufreq_driver);
242}
243
244module_init(cbe_cpufreq_init);
245module_exit(cbe_cpufreq_exit);
246
247MODULE_LICENSE("GPL");
248MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 2f194ba29899..9a0ee62691d5 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/module.h>
11 12
12#include <asm/io.h> 13#include <asm/io.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
@@ -16,8 +17,6 @@
16 17
17#include "cbe_regs.h" 18#include "cbe_regs.h"
18 19
19#define MAX_CBE 2
20
21/* 20/*
22 * Current implementation uses "cpu" nodes. We build our own mapping 21 * Current implementation uses "cpu" nodes. We build our own mapping
23 * array of cpu numbers to cpu nodes locally for now to allow interrupt 22 * array of cpu numbers to cpu nodes locally for now to allow interrupt
@@ -30,6 +29,8 @@ static struct cbe_regs_map
30 struct device_node *cpu_node; 29 struct device_node *cpu_node;
31 struct cbe_pmd_regs __iomem *pmd_regs; 30 struct cbe_pmd_regs __iomem *pmd_regs;
32 struct cbe_iic_regs __iomem *iic_regs; 31 struct cbe_iic_regs __iomem *iic_regs;
32 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
33 struct cbe_pmd_shadow_regs pmd_shadow_regs;
33} cbe_regs_maps[MAX_CBE]; 34} cbe_regs_maps[MAX_CBE];
34static int cbe_regs_map_count; 35static int cbe_regs_map_count;
35 36
@@ -42,6 +43,19 @@ static struct cbe_thread_map
42static struct cbe_regs_map *cbe_find_map(struct device_node *np) 43static struct cbe_regs_map *cbe_find_map(struct device_node *np)
43{ 44{
44 int i; 45 int i;
46 struct device_node *tmp_np;
47
48 if (strcasecmp(np->type, "spe") == 0) {
49 if (np->data == NULL) {
50 /* walk up path until cpu node was found */
51 tmp_np = np->parent;
52 while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
53 tmp_np = tmp_np->parent;
54
55 np->data = cbe_find_map(tmp_np);
56 }
57 return np->data;
58 }
45 59
46 for (i = 0; i < cbe_regs_map_count; i++) 60 for (i = 0; i < cbe_regs_map_count; i++)
47 if (cbe_regs_maps[i].cpu_node == np) 61 if (cbe_regs_maps[i].cpu_node == np)
@@ -56,6 +70,7 @@ struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
56 return NULL; 70 return NULL;
57 return map->pmd_regs; 71 return map->pmd_regs;
58} 72}
73EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
59 74
60struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) 75struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
61{ 76{
@@ -64,7 +79,23 @@ struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
64 return NULL; 79 return NULL;
65 return map->pmd_regs; 80 return map->pmd_regs;
66} 81}
82EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
67 83
84struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
85{
86 struct cbe_regs_map *map = cbe_find_map(np);
87 if (map == NULL)
88 return NULL;
89 return &map->pmd_shadow_regs;
90}
91
92struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
93{
94 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
95 if (map == NULL)
96 return NULL;
97 return &map->pmd_shadow_regs;
98}
68 99
69struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) 100struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
70{ 101{
@@ -73,6 +104,7 @@ struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
73 return NULL; 104 return NULL;
74 return map->iic_regs; 105 return map->iic_regs;
75} 106}
107
76struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) 108struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
77{ 109{
78 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 110 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
@@ -81,6 +113,36 @@ struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
81 return map->iic_regs; 113 return map->iic_regs;
82} 114}
83 115
116struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
117{
118 struct cbe_regs_map *map = cbe_find_map(np);
119 if (map == NULL)
120 return NULL;
121 return map->mic_tm_regs;
122}
123
124struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
125{
126 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
127 if (map == NULL)
128 return NULL;
129 return map->mic_tm_regs;
130}
131EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
132
133/* FIXME
134 * This is little more than a stub at the moment. It should be
135 * fleshed out so that it works for both SMT and non-SMT, no
136 * matter if the passed cpu is odd or even.
137 * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
138 * For SMT disabled, returns 0 for all cpus.
139 */
140u32 cbe_get_hw_thread_id(int cpu)
141{
142 return (cpu & 1);
143}
144EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
145
84void __init cbe_regs_init(void) 146void __init cbe_regs_init(void)
85{ 147{
86 int i; 148 int i;
@@ -119,6 +181,11 @@ void __init cbe_regs_init(void)
119 prop = get_property(cpu, "iic", NULL); 181 prop = get_property(cpu, "iic", NULL);
120 if (prop != NULL) 182 if (prop != NULL)
121 map->iic_regs = ioremap(prop->address, prop->len); 183 map->iic_regs = ioremap(prop->address, prop->len);
184
185 prop = (struct address_prop *)get_property(cpu, "mic-tm",
186 NULL);
187 if (prop != NULL)
188 map->mic_tm_regs = ioremap(prop->address, prop->len);
122 } 189 }
123} 190}
124 191
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
index e76e4a6af5bc..440a7ecc66ea 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.h
+++ b/arch/powerpc/platforms/cell/cbe_regs.h
@@ -4,12 +4,19 @@
4 * This file is intended to hold the various register definitions for CBE 4 * This file is intended to hold the various register definitions for CBE
5 * on-chip system devices (memory controller, IO controller, etc...) 5 * on-chip system devices (memory controller, IO controller, etc...)
6 * 6 *
7 * (C) Copyright IBM Corporation 2001,2006
8 *
9 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
10 * David J. Erb (djerb@us.ibm.com)
11 *
7 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 12 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
8 */ 13 */
9 14
10#ifndef CBE_REGS_H 15#ifndef CBE_REGS_H
11#define CBE_REGS_H 16#define CBE_REGS_H
12 17
18#include <asm/cell-pmu.h>
19
13/* 20/*
14 * 21 *
15 * Some HID register definitions 22 * Some HID register definitions
@@ -22,6 +29,7 @@
22#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul 29#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
23#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul 30#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
24 31
32#define MAX_CBE 2
25 33
26/* 34/*
27 * 35 *
@@ -29,51 +37,124 @@
29 * 37 *
30 */ 38 */
31 39
40union spe_reg {
41 u64 val;
42 u8 spe[8];
43};
44
45union ppe_spe_reg {
46 u64 val;
47 struct {
48 u32 ppe;
49 u32 spe;
50 };
51};
52
53
32struct cbe_pmd_regs { 54struct cbe_pmd_regs {
33 u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */ 55 /* Debug Bus Control */
56 u64 pad_0x0000; /* 0x0000 */
57
58 u64 group_control; /* 0x0008 */
59
60 u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
61
62 u64 debug_bus_control; /* 0x00a8 */
63
64 u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
65
66 u64 trace_aux_data; /* 0x0100 */
67 u64 trace_buffer_0_63; /* 0x0108 */
68 u64 trace_buffer_64_127; /* 0x0110 */
69 u64 trace_address; /* 0x0118 */
70 u64 ext_tr_timer; /* 0x0120 */
71
72 u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
73
74 /* Performance Monitor */
75 u64 pm_status; /* 0x0400 */
76 u64 pm_control; /* 0x0408 */
77 u64 pm_interval; /* 0x0410 */
78 u64 pm_ctr[4]; /* 0x0418 */
79 u64 pm_start_stop; /* 0x0438 */
80 u64 pm07_control[8]; /* 0x0440 */
81
82 u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
34 83
35 /* Thermal Sensor Registers */ 84 /* Thermal Sensor Registers */
36 u64 ts_ctsr1; /* 0x0800 */ 85 union spe_reg ts_ctsr1; /* 0x0800 */
37 u64 ts_ctsr2; /* 0x0808 */ 86 u64 ts_ctsr2; /* 0x0808 */
38 u64 ts_mtsr1; /* 0x0810 */ 87 union spe_reg ts_mtsr1; /* 0x0810 */
39 u64 ts_mtsr2; /* 0x0818 */ 88 u64 ts_mtsr2; /* 0x0818 */
40 u64 ts_itr1; /* 0x0820 */ 89 union spe_reg ts_itr1; /* 0x0820 */
41 u64 ts_itr2; /* 0x0828 */ 90 u64 ts_itr2; /* 0x0828 */
42 u64 ts_gitr; /* 0x0830 */ 91 u64 ts_gitr; /* 0x0830 */
43 u64 ts_isr; /* 0x0838 */ 92 u64 ts_isr; /* 0x0838 */
44 u64 ts_imr; /* 0x0840 */ 93 u64 ts_imr; /* 0x0840 */
45 u64 tm_cr1; /* 0x0848 */ 94 union spe_reg tm_cr1; /* 0x0848 */
46 u64 tm_cr2; /* 0x0850 */ 95 u64 tm_cr2; /* 0x0850 */
47 u64 tm_simr; /* 0x0858 */ 96 u64 tm_simr; /* 0x0858 */
48 u64 tm_tpr; /* 0x0860 */ 97 union ppe_spe_reg tm_tpr; /* 0x0860 */
49 u64 tm_str1; /* 0x0868 */ 98 union spe_reg tm_str1; /* 0x0868 */
50 u64 tm_str2; /* 0x0870 */ 99 u64 tm_str2; /* 0x0870 */
51 u64 tm_tsr; /* 0x0878 */ 100 union ppe_spe_reg tm_tsr; /* 0x0878 */
52 101
53 /* Power Management */ 102 /* Power Management */
54 u64 pm_control; /* 0x0880 */ 103 u64 pmcr; /* 0x0880 */
55#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000 104#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
56 u64 pm_status; /* 0x0888 */ 105 u64 pmsr; /* 0x0888 */
57 106
58 /* Time Base Register */ 107 /* Time Base Register */
59 u64 tbr; /* 0x0890 */ 108 u64 tbr; /* 0x0890 */
60 109
61 u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */ 110 u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
62 111
63 /* Fault Isolation Registers */ 112 /* Fault Isolation Registers */
64 u64 checkstop_fir; /* 0x0c00 */ 113 u64 checkstop_fir; /* 0x0c00 */
65 u64 recoverable_fir; 114 u64 recoverable_fir; /* 0x0c08 */
66 u64 spec_att_mchk_fir; 115 u64 spec_att_mchk_fir; /* 0x0c10 */
67 u64 fir_mode_reg; 116 u64 fir_mode_reg; /* 0x0c18 */
68 u64 fir_enable_mask; 117 u64 fir_enable_mask; /* 0x0c20 */
69 118
70 u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */ 119 u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */
71}; 120};
72 121
73extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np); 122extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
74extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu); 123extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
75 124
76/* 125/*
126 * PMU shadow registers
127 *
128 * Many of the registers in the performance monitoring unit are write-only,
129 * so we need to save a copy of what we write to those registers.
130 *
131 * The actual data counters are read/write. However, writing to the counters
132 * only takes effect if the PMU is enabled. Otherwise the value is stored in
133 * a hardware latch until the next time the PMU is enabled. So we save a copy
134 * of the counter values if we need to read them back while the PMU is
135 * disabled. The counter_value_in_latch field is a bitmap indicating which
136 * counters currently have a value waiting to be written.
137 */
138
139struct cbe_pmd_shadow_regs {
140 u32 group_control;
141 u32 debug_bus_control;
142 u32 trace_address;
143 u32 ext_tr_timer;
144 u32 pm_status;
145 u32 pm_control;
146 u32 pm_interval;
147 u32 pm_start_stop;
148 u32 pm07_control[NR_CTRS];
149
150 u32 pm_ctr[NR_PHYS_CTRS];
151 u32 counter_value_in_latch;
152};
153
154extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
155extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
156
157/*
77 * 158 *
78 * IIC unit register definitions 159 * IIC unit register definitions
79 * 160 *
@@ -102,18 +183,28 @@ struct cbe_iic_regs {
102 183
103 /* IIC interrupt registers */ 184 /* IIC interrupt registers */
104 struct cbe_iic_thread_regs thread[2]; /* 0x0400 */ 185 struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
105 u64 iic_ir; /* 0x0440 */ 186
106 u64 iic_is; /* 0x0448 */ 187 u64 iic_ir; /* 0x0440 */
188#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
189#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
190#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
191#define CBE_IIC_IR_IOC_0 0x0
192#define CBE_IIC_IR_IOC_1S 0xb
193#define CBE_IIC_IR_PT_0 0xe
194#define CBE_IIC_IR_PT_1 0xf
195
196 u64 iic_is; /* 0x0448 */
197#define CBE_IIC_IS_PMI 0x2
107 198
108 u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */ 199 u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
109 200
110 /* IOC FIR */ 201 /* IOC FIR */
111 u64 ioc_fir_reset; /* 0x0500 */ 202 u64 ioc_fir_reset; /* 0x0500 */
112 u64 ioc_fir_set; 203 u64 ioc_fir_set; /* 0x0508 */
113 u64 ioc_checkstop_enable; 204 u64 ioc_checkstop_enable; /* 0x0510 */
114 u64 ioc_fir_error_mask; 205 u64 ioc_fir_error_mask; /* 0x0518 */
115 u64 ioc_syserr_enable; 206 u64 ioc_syserr_enable; /* 0x0520 */
116 u64 ioc_fir; 207 u64 ioc_fir; /* 0x0528 */
117 208
118 u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */ 209 u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
119}; 210};
@@ -122,6 +213,48 @@ extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
122extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu); 213extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
123 214
124 215
216struct cbe_mic_tm_regs {
217 u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
218
219 u64 mic_ctl_cnfg2; /* 0x0040 */
220#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
221#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
222#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
223#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
224
225 u64 pad_0x0048; /* 0x0048 */
226
227 u64 mic_aux_trc_base; /* 0x0050 */
228 u64 mic_aux_trc_max_addr; /* 0x0058 */
229 u64 mic_aux_trc_cur_addr; /* 0x0060 */
230 u64 mic_aux_trc_grf_addr; /* 0x0068 */
231 u64 mic_aux_trc_grf_data; /* 0x0070 */
232
233 u64 pad_0x0078; /* 0x0078 */
234
235 u64 mic_ctl_cnfg_0; /* 0x0080 */
236#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
237
238 u64 pad_0x0088; /* 0x0088 */
239
240 u64 slow_fast_timer_0; /* 0x0090 */
241 u64 slow_next_timer_0; /* 0x0098 */
242
243 u8 pad_0x00a0_0x01c0[0x01c0 - 0x0a0]; /* 0x00a0 */
244
245 u64 mic_ctl_cnfg_1; /* 0x01c0 */
246#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
247 u64 pad_0x01c8; /* 0x01c8 */
248
249 u64 slow_fast_timer_1; /* 0x01d0 */
250 u64 slow_next_timer_1; /* 0x01d8 */
251
252 u8 pad_0x01e0_0x1000[0x1000 - 0x01e0]; /* 0x01e0 */
253};
254
255extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
256extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
257
125/* Init this module early */ 258/* Init this module early */
126extern void cbe_regs_init(void); 259extern void cbe_regs_init(void);
127 260
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644
index 000000000000..70e0d968d30f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -0,0 +1,228 @@
1/*
2 * thermal support for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/sysdev.h>
25#include <linux/kernel.h>
26#include <linux/cpu.h>
27#include <asm/spu.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30
31#include "cbe_regs.h"
32#include "spu_priv1_mmio.h"
33
34static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
35{
36 struct spu *spu;
37
38 spu = container_of(sysdev, struct spu, sysdev);
39
40 return cbe_get_pmd_regs(spu_devnode(spu));
41}
42
43/* returns the value for a given spu in a given register */
44static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
45{
46 unsigned int *id;
47 union spe_reg value;
48 struct spu *spu;
49
50 /* getting the id from the reg attribute will not work on future device-tree layouts
51 * in future we should store the id to the spu struct and use it here */
52 spu = container_of(sysdev, struct spu, sysdev);
53 id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
54 value.val = in_be64(&reg->val);
55
56 return value.spe[*id];
57}
58
59static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
60{
61 int value;
62 struct cbe_pmd_regs __iomem *pmd_regs;
63
64 pmd_regs = get_pmd_regs(sysdev);
65
66 value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
67 /* clear all other bits */
68 value &= 0x3F;
69 /* temp is stored in steps of 2 degrees */
70 value *= 2;
71 /* base temp is 65 degrees */
72 value += 65;
73
74 return sprintf(buf, "%d\n", (int) value);
75}
76
77static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
78{
79 struct cbe_pmd_regs __iomem *pmd_regs;
80 u64 value;
81
82 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
83 value = in_be64(&pmd_regs->ts_ctsr2);
84
85 /* access the corresponding byte */
86 value >>= pos;
87 /* clear all other bits */
88 value &= 0x3F;
89 /* temp is stored in steps of 2 degrees */
90 value *= 2;
91 /* base temp is 65 degrees */
92 value += 65;
93
94 return sprintf(buf, "%d\n", (int) value);
95}
96
97
98/* shows the temperature of the DTS on the PPE,
99 * located near the linear thermal sensor */
100static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf)
101{
102 return ppe_show_temp(sysdev, buf, 32);
103}
104
105/* shows the temperature of the second DTS on the PPE */
106static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
107{
108 return ppe_show_temp(sysdev, buf, 0);
109}
110
111static struct sysdev_attribute attr_spu_temperature = {
112 .attr = {.name = "temperature", .mode = 0400 },
113 .show = spu_show_temp,
114};
115
116static struct attribute *spu_attributes[] = {
117 &attr_spu_temperature.attr,
118 NULL,
119};
120
121static struct attribute_group spu_attribute_group = {
122 .name = "thermal",
123 .attrs = spu_attributes,
124};
125
126static struct sysdev_attribute attr_ppe_temperature0 = {
127 .attr = {.name = "temperature0", .mode = 0400 },
128 .show = ppe_show_temp0,
129};
130
131static struct sysdev_attribute attr_ppe_temperature1 = {
132 .attr = {.name = "temperature1", .mode = 0400 },
133 .show = ppe_show_temp1,
134};
135
136static struct attribute *ppe_attributes[] = {
137 &attr_ppe_temperature0.attr,
138 &attr_ppe_temperature1.attr,
139 NULL,
140};
141
142static struct attribute_group ppe_attribute_group = {
143 .name = "thermal",
144 .attrs = ppe_attributes,
145};
146
147/*
148 * initialize throttling with default values
149 */
150static void __init init_default_values(void)
151{
152 int cpu;
153 struct cbe_pmd_regs __iomem *pmd_regs;
154 struct sys_device *sysdev;
155 union ppe_spe_reg tpr;
156 union spe_reg str1;
157 u64 str2;
158 union spe_reg cr1;
159 u64 cr2;
160
161 /* TPR defaults */
162 /* ppe
163 * 1F - no full stop
164 * 08 - dynamic throttling starts if over 80 degrees
165 * 03 - dynamic throttling ceases if below 70 degrees */
166 tpr.ppe = 0x1F0803;
167 /* spe
168 * 10 - full stopped when over 96 degrees
169 * 08 - dynamic throttling starts if over 80 degrees
170 * 03 - dynamic throttling ceases if below 70 degrees
171 */
172 tpr.spe = 0x100803;
173
174 /* STR defaults */
175 /* str1
176 * 10 - stop 16 of 32 cycles
177 */
178 str1.val = 0x1010101010101010ull;
179 /* str2
180 * 10 - stop 16 of 32 cycles
181 */
182 str2 = 0x10;
183
184 /* CR defaults */
185 /* cr1
186 * 4 - normal operation
187 */
188 cr1.val = 0x0404040404040404ull;
189 /* cr2
190 * 4 - normal operation
191 */
192 cr2 = 0x04;
193
194 for_each_possible_cpu (cpu) {
195 pr_debug("processing cpu %d\n", cpu);
196 sysdev = get_cpu_sysdev(cpu);
197 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
198
199 out_be64(&pmd_regs->tm_str2, str2);
200 out_be64(&pmd_regs->tm_str1.val, str1.val);
201 out_be64(&pmd_regs->tm_tpr.val, tpr.val);
202 out_be64(&pmd_regs->tm_cr1.val, cr1.val);
203 out_be64(&pmd_regs->tm_cr2, cr2);
204 }
205}
206
207
208static int __init thermal_init(void)
209{
210 init_default_values();
211
212 spu_add_sysdev_attr_group(&spu_attribute_group);
213 cpu_add_sysdev_attr_group(&ppe_attribute_group);
214
215 return 0;
216}
217module_init(thermal_init);
218
219static void __exit thermal_exit(void)
220{
221 spu_remove_sysdev_attr_group(&spu_attribute_group);
222 cpu_remove_sysdev_attr_group(&ppe_attribute_group);
223}
224module_exit(thermal_exit);
225
226MODULE_LICENSE("GPL");
227MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
228
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a914c12b4060..6666d037eb44 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -396,3 +396,19 @@ void __init iic_init_IRQ(void)
396 /* Enable on current CPU */ 396 /* Enable on current CPU */
397 iic_setup_cpu(); 397 iic_setup_cpu();
398} 398}
399
400void iic_set_interrupt_routing(int cpu, int thread, int priority)
401{
402 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
403 u64 iic_ir = 0;
404 int node = cpu >> 1;
405
406 /* Set which node and thread will handle the next interrupt */
407 iic_ir |= CBE_IIC_IR_PRIO(priority) |
408 CBE_IIC_IR_DEST_NODE(node);
409 if (thread == 0)
410 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
411 else
412 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
413 out_be64(&iic_regs->iic_ir, iic_ir);
414}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 9ba1d3c17b4b..942dc39d6045 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -83,5 +83,7 @@ extern u8 iic_get_target_id(int cpu);
83 83
84extern void spider_init_IRQ(void); 84extern void spider_init_IRQ(void);
85 85
86extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
87
86#endif 88#endif
87#endif /* ASM_CELL_PIC_H */ 89#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
new file mode 100644
index 000000000000..7c73128305ec
--- /dev/null
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * IBM, Corp.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#undef DEBUG
10
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/pci.h>
14#include <asm/io.h>
15#include <asm/machdep.h>
16#include <asm/pci-bridge.h>
17#include <asm/ppc-pci.h>
18
19
20#define SPIDER_PCI_REG_BASE 0xd000
21#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
22#define SPIDER_PCI_DUMMY_READ 0x0810
23#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
24
25/* Undefine that to re-enable bogus prefetch
26 *
27 * Without that workaround, the chip will do bogus prefetch past
28 * page boundary from system memory. This setting will disable that,
29 * though the documentation is unclear as to the consequences of doing
30 * so, either purely performances, or possible misbehaviour... It's not
31 * clear wether the chip can handle unaligned accesses at all without
32 * prefetching enabled.
33 *
34 * For now, things appear to be behaving properly with that prefetching
35 * disabled and IDE, possibly because IDE isn't doing any unaligned
36 * access.
37 */
38#define SPIDER_DISABLE_PREFETCH
39
40#define MAX_SPIDERS 3
41
42static struct spider_pci_bus {
43 void __iomem *regs;
44 unsigned long mmio_start;
45 unsigned long mmio_end;
46 unsigned long pio_vstart;
47 unsigned long pio_vend;
48} spider_pci_busses[MAX_SPIDERS];
49static int spider_pci_count;
50
51static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
52 unsigned long paddr)
53{
54 int i;
55
56 for (i = 0; i < spider_pci_count; i++) {
57 struct spider_pci_bus *bus = &spider_pci_busses[i];
58 if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
59 return bus;
60 if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
61 return bus;
62 }
63 return NULL;
64}
65
66static void spider_io_flush(const volatile void __iomem *addr)
67{
68 struct spider_pci_bus *bus;
69 int token;
70
71 /* Get platform token (set by ioremap) from address */
72 token = PCI_GET_ADDR_TOKEN(addr);
73
74 /* Fast path if we have a non-0 token, it indicates which bus we
75 * are on.
76 *
77 * If the token is 0, that means either the the ioremap was done
78 * before we initialized this layer, or it's a PIO operation. We
79 * fallback to a low path in this case. Hopefully, internal devices
80 * which are ioremap'ed early should use in_XX/out_XX functions
81 * instead of the PCI ones and thus not suffer from the slowdown.
82 *
83 * Also note that currently, the workaround will not work for areas
84 * that are not mapped with PTEs (bolted in the hash table). This
85 * is the case for ioremaps done very early at boot (before
86 * mem_init_done) and includes the mapping of the ISA IO space.
87 *
88 * Fortunately, none of the affected devices is expected to do DMA
89 * and thus there should be no problem in practice.
90 *
91 * In order to improve performances, we only do the PTE search for
92 * addresses falling in the PHB IO space area. That means it will
93 * not work for hotplug'ed PHBs but those don't exist with Spider.
94 */
95 if (token && token <= spider_pci_count)
96 bus = &spider_pci_busses[token - 1];
97 else {
98 unsigned long vaddr, paddr;
99 pte_t *ptep;
100
101 /* Fixup physical address */
102 vaddr = (unsigned long)PCI_FIX_ADDR(addr);
103
104 /* Check if it's in allowed range for PIO */
105 if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
106 return;
107
108 /* Try to find a PTE. If not, clear the paddr, we'll do
109 * a vaddr only lookup (PIO only)
110 */
111 ptep = find_linux_pte(init_mm.pgd, vaddr);
112 if (ptep == NULL)
113 paddr = 0;
114 else
115 paddr = pte_pfn(*ptep) << PAGE_SHIFT;
116
117 bus = spider_pci_find(vaddr, paddr);
118 if (bus == NULL)
119 return;
120 }
121
122 /* Now do the workaround
123 */
124 (void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
125}
126
127static u8 spider_readb(const volatile void __iomem *addr)
128{
129 u8 val = __do_readb(addr);
130 spider_io_flush(addr);
131 return val;
132}
133
134static u16 spider_readw(const volatile void __iomem *addr)
135{
136 u16 val = __do_readw(addr);
137 spider_io_flush(addr);
138 return val;
139}
140
141static u32 spider_readl(const volatile void __iomem *addr)
142{
143 u32 val = __do_readl(addr);
144 spider_io_flush(addr);
145 return val;
146}
147
148static u64 spider_readq(const volatile void __iomem *addr)
149{
150 u64 val = __do_readq(addr);
151 spider_io_flush(addr);
152 return val;
153}
154
155static u16 spider_readw_be(const volatile void __iomem *addr)
156{
157 u16 val = __do_readw_be(addr);
158 spider_io_flush(addr);
159 return val;
160}
161
162static u32 spider_readl_be(const volatile void __iomem *addr)
163{
164 u32 val = __do_readl_be(addr);
165 spider_io_flush(addr);
166 return val;
167}
168
169static u64 spider_readq_be(const volatile void __iomem *addr)
170{
171 u64 val = __do_readq_be(addr);
172 spider_io_flush(addr);
173 return val;
174}
175
176static void spider_readsb(const volatile void __iomem *addr, void *buf,
177 unsigned long count)
178{
179 __do_readsb(addr, buf, count);
180 spider_io_flush(addr);
181}
182
183static void spider_readsw(const volatile void __iomem *addr, void *buf,
184 unsigned long count)
185{
186 __do_readsw(addr, buf, count);
187 spider_io_flush(addr);
188}
189
190static void spider_readsl(const volatile void __iomem *addr, void *buf,
191 unsigned long count)
192{
193 __do_readsl(addr, buf, count);
194 spider_io_flush(addr);
195}
196
197static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
198 unsigned long n)
199{
200 __do_memcpy_fromio(dest, src, n);
201 spider_io_flush(src);
202}
203
204
205static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
206 unsigned long flags)
207{
208 struct spider_pci_bus *bus;
209 void __iomem *res = __ioremap(addr, size, flags);
210 int busno;
211
212 pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
213 addr, size, flags, res);
214
215 bus = spider_pci_find(0, addr);
216 if (bus != NULL) {
217 busno = bus - spider_pci_busses;
218 pr_debug(" found bus %d, setting token\n", busno);
219 PCI_SET_ADDR_TOKEN(res, busno + 1);
220 }
221 pr_debug(" result=0x%p\n", res);
222
223 return res;
224}
225
226static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
227{
228#ifdef SPIDER_DISABLE_PREFETCH
229 u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
230 pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
231 out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
232#endif
233
234 /* Configure the dummy address for the workaround */
235 out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
236}
237
238static void __init spider_pci_add_one(struct pci_controller *phb)
239{
240 struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
241 struct device_node *np = phb->arch_data;
242 struct resource rsrc;
243 void __iomem *regs;
244
245 if (spider_pci_count >= MAX_SPIDERS) {
246 printk(KERN_ERR "Too many spider bridges, workarounds"
247 " disabled for %s\n", np->full_name);
248 return;
249 }
250
251 /* Get the registers for the beast */
252 if (of_address_to_resource(np, 0, &rsrc)) {
253 printk(KERN_ERR "Failed to get registers for spider %s"
254 " workarounds disabled\n", np->full_name);
255 return;
256 }
257
258 /* Mask out some useless bits in there to get to the base of the
259 * spider chip
260 */
261 rsrc.start &= ~0xfffffffful;
262
263 /* Map them */
264 regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
265 if (regs == NULL) {
266 printk(KERN_ERR "Failed to map registers for spider %s"
267 " workarounds disabled\n", np->full_name);
268 return;
269 }
270
271 spider_pci_count++;
272
273 /* We assume spiders only have one MMIO resource */
274 bus->mmio_start = phb->mem_resources[0].start;
275 bus->mmio_end = phb->mem_resources[0].end + 1;
276
277 bus->pio_vstart = (unsigned long)phb->io_base_virt;
278 bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
279
280 bus->regs = regs;
281
282 printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
283
284 pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
285 bus->mmio_start, bus->mmio_end);
286 pr_debug(" pio (V) = 0x%016lx..0x%016lx\n",
287 bus->pio_vstart, bus->pio_vend);
288 pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
289 rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
290
291 spider_pci_setup_chip(bus);
292}
293
294static struct ppc_pci_io __initdata spider_pci_io = {
295 .readb = spider_readb,
296 .readw = spider_readw,
297 .readl = spider_readl,
298 .readq = spider_readq,
299 .readw_be = spider_readw_be,
300 .readl_be = spider_readl_be,
301 .readq_be = spider_readq_be,
302 .readsb = spider_readsb,
303 .readsw = spider_readsw,
304 .readsl = spider_readsl,
305 .memcpy_fromio = spider_memcpy_fromio,
306};
307
308static int __init spider_pci_workaround_init(void)
309{
310 struct pci_controller *phb;
311
312 if (!machine_is(cell))
313 return 0;
314
315 /* Find spider bridges. We assume they have been all probed
316 * in setup_arch(). If that was to change, we would need to
317 * update this code to cope with dynamically added busses
318 */
319 list_for_each_entry(phb, &hose_list, list_node) {
320 struct device_node *np = phb->arch_data;
321 const char *model = get_property(np, "model", NULL);
322
323 /* If no model property or name isn't exactly "pci", skip */
324 if (model == NULL || strcmp(np->name, "pci"))
325 continue;
326 /* If model is not "Spider", skip */
327 if (strcmp(model, "Spider"))
328 continue;
329 spider_pci_add_one(phb);
330 }
331
332 /* No Spider PCI found, exit */
333 if (spider_pci_count == 0)
334 return 0;
335
336 /* Setup IO callbacks. We only setup MMIO reads. PIO reads will
337 * fallback to MMIO reads (though without a token, thus slower)
338 */
339 ppc_pci_io = spider_pci_io;
340
341 /* Setup ioremap callback */
342 ppc_md.ioremap = spider_ioremap;
343
344 return 0;
345}
346arch_initcall(spider_pci_workaround_init);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index aca4c3db0dde..b43466ba8096 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1,514 +1,747 @@
1/* 1/*
2 * IOMMU implementation for Cell Broadband Processor Architecture 2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu,
6 * except for the handling of the high order bit that is required
7 * by the Spider bridge. These should be split into a separate
8 * file at the point where we get a different bridge chip.
9 * 3 *
10 * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH, 4 * (C) Copyright IBM Corporation 2006
11 * Arnd Bergmann <arndb@de.ibm.com>
12 * 5 *
13 * Based on linear mapping 6 * Author: Jeremy Kerr <jk@ozlabs.org>
14 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
15 * 7 *
16 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or modify
17 * modify it under the terms of the GNU General Public License 9 * it under the terms of the GNU General Public License as published by
18 * as published by the Free Software Foundation; either version 10 * the Free Software Foundation; either version 2, or (at your option)
19 * 2 of the License, or (at your option) any later version. 11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 21 */
21 22
22#undef DEBUG 23#undef DEBUG
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/string.h>
28#include <linux/init.h> 26#include <linux/init.h>
29#include <linux/bootmem.h> 27#include <linux/interrupt.h>
30#include <linux/mm.h> 28#include <linux/notifier.h>
31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/compiler.h>
34 29
35#include <asm/sections.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/prom.h> 30#include <asm/prom.h>
39#include <asm/pci-bridge.h> 31#include <asm/iommu.h>
40#include <asm/machdep.h> 32#include <asm/machdep.h>
41#include <asm/pmac_feature.h> 33#include <asm/pci-bridge.h>
42#include <asm/abs_addr.h>
43#include <asm/system.h>
44#include <asm/ppc-pci.h>
45#include <asm/udbg.h> 34#include <asm/udbg.h>
35#include <asm/of_platform.h>
36#include <asm/lmb.h>
46 37
47#include "iommu.h" 38#include "cbe_regs.h"
39#include "interrupt.h"
48 40
49static inline unsigned long 41/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
50get_iopt_entry(unsigned long real_address, unsigned long ioid, 42 * instead of leaving them mapped to some dummy page. This can be
51 unsigned long prot) 43 * enabled once the appropriate workarounds for spider bugs have
52{ 44 * been enabled
53 return (prot & IOPT_PROT_MASK) 45 */
54 | (IOPT_COHERENT) 46#define CELL_IOMMU_REAL_UNMAP
55 | (IOPT_ORDER_VC)
56 | (real_address & IOPT_RPN_MASK)
57 | (ioid & IOPT_IOID_MASK);
58}
59 47
60typedef struct { 48/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
61 unsigned long val; 49 * IO PTEs based on the transfer direction. That can be enabled
62} ioste; 50 * once spider-net has been fixed to pass the correct direction
51 * to the DMA mapping functions
52 */
53#define CELL_IOMMU_STRICT_PROTECTION
54
55
56#define NR_IOMMUS 2
57
58/* IOC mmap registers */
59#define IOC_Reg_Size 0x2000
60
61#define IOC_IOPT_CacheInvd 0x908
62#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
63#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
64#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
65
66#define IOC_IOST_Origin 0x918
67#define IOC_IOST_Origin_E 0x8000000000000000ul
68#define IOC_IOST_Origin_HW 0x0000000000000800ul
69#define IOC_IOST_Origin_HL 0x0000000000000400ul
70
71#define IOC_IO_ExcpStat 0x920
72#define IOC_IO_ExcpStat_V 0x8000000000000000ul
73#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
74#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
75#define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
76#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
77#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
78#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
79
80#define IOC_IO_ExcpMask 0x928
81#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
82#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
83
84#define IOC_IOCmd_Offset 0x1000
85
86#define IOC_IOCmd_Cfg 0xc00
87#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
88
89
90/* Segment table entries */
91#define IOSTE_V 0x8000000000000000ul /* valid */
92#define IOSTE_H 0x4000000000000000ul /* cache hint */
93#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
94#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
95#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
96#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
97#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
98#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
99#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
100
101/* Page table entries */
102#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
103#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
104#define IOPTE_M 0x2000000000000000ul /* coherency required */
105#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
106#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
107#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
108#define IOPTE_H 0x0000000000000800ul /* cache hint */
109#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
110
111
112/* IOMMU sizing */
113#define IO_SEGMENT_SHIFT 28
114#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
115
116/* The high bit needs to be set on every DMA address */
117#define SPIDER_DMA_OFFSET 0x80000000ul
118
119struct iommu_window {
120 struct list_head list;
121 struct cbe_iommu *iommu;
122 unsigned long offset;
123 unsigned long size;
124 unsigned long pte_offset;
125 unsigned int ioid;
126 struct iommu_table table;
127};
63 128
64static inline ioste 129#define NAMESIZE 8
65mk_ioste(unsigned long val) 130struct cbe_iommu {
66{ 131 int nid;
67 ioste ioste = { .val = val, }; 132 char name[NAMESIZE];
68 return ioste; 133 void __iomem *xlate_regs;
69} 134 void __iomem *cmd_regs;
135 unsigned long *stab;
136 unsigned long *ptab;
137 void *pad_page;
138 struct list_head windows;
139};
140
141/* Static array of iommus, one per node
142 * each contains a list of windows, keyed from dma_window property
143 * - on bus setup, look for a matching window, or create one
144 * - on dev setup, assign iommu_table ptr
145 */
146static struct cbe_iommu iommus[NR_IOMMUS];
147static int cbe_nr_iommus;
70 148
71static inline ioste 149static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
72get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size) 150 long n_ptes)
73{ 151{
74 unsigned long ps; 152 unsigned long *reg, val;
75 unsigned long iostep; 153 long n;
76 unsigned long nnpt;
77 unsigned long shift;
78
79 switch (page_size) {
80 case 0x1000000:
81 ps = IOST_PS_16M;
82 nnpt = 0; /* one page per segment */
83 shift = 5; /* segment has 16 iopt entries */
84 break;
85
86 case 0x100000:
87 ps = IOST_PS_1M;
88 nnpt = 0; /* one page per segment */
89 shift = 1; /* segment has 256 iopt entries */
90 break;
91
92 case 0x10000:
93 ps = IOST_PS_64K;
94 nnpt = 0x07; /* 8 pages per io page table */
95 shift = 0; /* all entries are used */
96 break;
97
98 case 0x1000:
99 ps = IOST_PS_4K;
100 nnpt = 0x7f; /* 128 pages per io page table */
101 shift = 0; /* all entries are used */
102 break;
103
104 default: /* not a known compile time constant */
105 {
106 /* BUILD_BUG_ON() is not usable here */
107 extern void __get_iost_entry_bad_page_size(void);
108 __get_iost_entry_bad_page_size();
109 }
110 break;
111 }
112 154
113 iostep = iopt_base + 155 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
114 /* need 8 bytes per iopte */
115 (((io_address / page_size * 8)
116 /* align io page tables on 4k page boundaries */
117 << shift)
118 /* nnpt+1 pages go into each iopt */
119 & ~(nnpt << 12));
120
121 nnpt++; /* this seems to work, but the documentation is not clear
122 about wether we put nnpt or nnpt-1 into the ioste bits.
123 In theory, this can't work for 4k pages. */
124 return mk_ioste(IOST_VALID_MASK
125 | (iostep & IOST_PT_BASE_MASK)
126 | ((nnpt << 5) & IOST_NNPT_MASK)
127 | (ps & IOST_PS_MASK));
128}
129 156
130/* compute the address of an io pte */ 157 while (n_ptes > 0) {
131static inline unsigned long 158 /* we can invalidate up to 1 << 11 PTEs at once */
132get_ioptep(ioste iost_entry, unsigned long io_address) 159 n = min(n_ptes, 1l << 11);
133{ 160 val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
134 unsigned long iopt_base; 161 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
135 unsigned long page_size; 162 | IOC_IOPT_CacheInvd_Busy;
136 unsigned long page_number;
137 unsigned long iopt_offset;
138
139 iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
140 page_size = iost_entry.val & IOST_PS_MASK;
141
142 /* decode page size to compute page number */
143 page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
144 /* page number is an offset into the io page table */
145 iopt_offset = (page_number << 3) & 0x7fff8ul;
146 return iopt_base + iopt_offset;
147}
148 163
149/* compute the tag field of the iopt cache entry */ 164 out_be64(reg, val);
150static inline unsigned long 165 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
151get_ioc_tag(ioste iost_entry, unsigned long io_address) 166 ;
152{
153 unsigned long iopte = get_ioptep(iost_entry, io_address);
154 167
155 return IOPT_VALID_MASK 168 n_ptes -= n;
156 | ((iopte & 0x00000000000000ff8ul) >> 3) 169 pte += n;
157 | ((iopte & 0x0000003fffffc0000ul) >> 9); 170 }
158} 171}
159 172
160/* compute the hashed 6 bit index for the 4-way associative pte cache */ 173static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
161static inline unsigned long 174 unsigned long uaddr, enum dma_data_direction direction)
162get_ioc_hash(ioste iost_entry, unsigned long io_address)
163{ 175{
164 unsigned long iopte = get_ioptep(iost_entry, io_address); 176 int i;
165 177 unsigned long *io_pte, base_pte;
166 return ((iopte & 0x000000000000001f8ul) >> 3) 178 struct iommu_window *window =
167 ^ ((iopte & 0x00000000000020000ul) >> 17) 179 container_of(tbl, struct iommu_window, table);
168 ^ ((iopte & 0x00000000000010000ul) >> 15) 180
169 ^ ((iopte & 0x00000000000008000ul) >> 13) 181 /* implementing proper protection causes problems with the spidernet
170 ^ ((iopte & 0x00000000000004000ul) >> 11) 182 * driver - check mapping directions later, but allow read & write by
171 ^ ((iopte & 0x00000000000002000ul) >> 9) 183 * default for now.*/
172 ^ ((iopte & 0x00000000000001000ul) >> 7); 184#ifdef CELL_IOMMU_STRICT_PROTECTION
185 /* to avoid referencing a global, we use a trick here to setup the
186 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
187 * together for each of the 3 supported direction values. It is then
188 * shifted left so that the fields matching the desired direction
189 * lands on the appropriate bits, and other bits are masked out.
190 */
191 const unsigned long prot = 0xc48;
192 base_pte =
193 ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
194 | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
195#else
196 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
197 (window->ioid & IOPTE_IOID_Mask);
198#endif
199
200 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
201
202 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
203 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
204
205 mb();
206
207 invalidate_tce_cache(window->iommu, io_pte, npages);
208
209 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
210 index, npages, direction, base_pte);
173} 211}
174 212
175/* same as above, but pretend that we have a simpler 1-way associative 213static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
176 pte cache with an 8 bit index */
177static inline unsigned long
178get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
179{ 214{
180 unsigned long iopte = get_ioptep(iost_entry, io_address);
181
182 return ((iopte & 0x000000000000001f8ul) >> 3)
183 ^ ((iopte & 0x00000000000020000ul) >> 17)
184 ^ ((iopte & 0x00000000000010000ul) >> 15)
185 ^ ((iopte & 0x00000000000008000ul) >> 13)
186 ^ ((iopte & 0x00000000000004000ul) >> 11)
187 ^ ((iopte & 0x00000000000002000ul) >> 9)
188 ^ ((iopte & 0x00000000000001000ul) >> 7)
189 ^ ((iopte & 0x0000000000000c000ul) >> 8);
190}
191 215
192static inline ioste 216 int i;
193get_iost_cache(void __iomem *base, unsigned long index) 217 unsigned long *io_pte, pte;
194{ 218 struct iommu_window *window =
195 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); 219 container_of(tbl, struct iommu_window, table);
196 return mk_ioste(in_be64(&p[index]));
197}
198 220
199static inline void 221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
200set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
201{
202 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
203 pr_debug("ioste %02lx was %016lx, store %016lx", index,
204 get_iost_cache(base, index).val, ste.val);
205 out_be64(&p[index], ste.val);
206 pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
207}
208 222
209static inline unsigned long 223#ifdef CELL_IOMMU_REAL_UNMAP
210get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag) 224 pte = 0;
211{ 225#else
212 unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR); 226 /* spider bridge does PCI reads after freeing - insert a mapping
213 unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG); 227 * to a scratch page instead of an invalid entry */
228 pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
229 | (window->ioid & IOPTE_IOID_Mask);
230#endif
214 231
215 *tag = tags[index]; 232 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
216 rmb();
217 return *p;
218}
219 233
220static inline void 234 for (i = 0; i < npages; i++)
221set_iopt_cache(void __iomem *base, unsigned long index, 235 io_pte[i] = pte;
222 unsigned long tag, unsigned long val) 236
223{ 237 mb();
224 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
225 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
226 238
227 out_be64(p, val); 239 invalidate_tce_cache(window->iommu, io_pte, npages);
228 out_be64(&tags[index], tag);
229} 240}
230 241
231static inline void 242static irqreturn_t ioc_interrupt(int irq, void *data)
232set_iost_origin(void __iomem *base)
233{ 243{
234 unsigned long __iomem *p = base + IOC_ST_ORIGIN; 244 unsigned long stat;
235 unsigned long origin = IOSTO_ENABLE | IOSTO_SW; 245 struct cbe_iommu *iommu = data;
236 246
237 pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin); 247 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
238 out_be64(p, origin); 248
249 /* Might want to rate limit it */
250 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
251 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
252 !!(stat & IOC_IO_ExcpStat_V),
253 (stat & IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
254 (stat & IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
255 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
256 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
257 printk(KERN_ERR " page=0x%016lx\n",
258 stat & IOC_IO_ExcpStat_ADDR_Mask);
259
260 /* clear interrupt */
261 stat &= ~IOC_IO_ExcpStat_V;
262 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
263
264 return IRQ_HANDLED;
239} 265}
240 266
241static inline void 267static int cell_iommu_find_ioc(int nid, unsigned long *base)
242set_iocmd_config(void __iomem *base)
243{ 268{
244 unsigned long __iomem *p = base + 0xc00; 269 struct device_node *np;
245 unsigned long conf; 270 struct resource r;
271
272 *base = 0;
273
274 /* First look for new style /be nodes */
275 for_each_node_by_name(np, "ioc") {
276 if (of_node_to_nid(np) != nid)
277 continue;
278 if (of_address_to_resource(np, 0, &r)) {
279 printk(KERN_ERR "iommu: can't get address for %s\n",
280 np->full_name);
281 continue;
282 }
283 *base = r.start;
284 of_node_put(np);
285 return 0;
286 }
246 287
247 conf = in_be64(p); 288 /* Ok, let's try the old way */
248 pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE); 289 for_each_node_by_type(np, "cpu") {
249 out_be64(p, conf | IOCMD_CONF_TE); 290 const unsigned int *nidp;
291 const unsigned long *tmp;
292
293 nidp = get_property(np, "node-id", NULL);
294 if (nidp && *nidp == nid) {
295 tmp = get_property(np, "ioc-translation", NULL);
296 if (tmp) {
297 *base = *tmp;
298 of_node_put(np);
299 return 0;
300 }
301 }
302 }
303
304 return -ENODEV;
250} 305}
251 306
252static void enable_mapping(void __iomem *base, void __iomem *mmio_base) 307static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
253{ 308{
254 set_iocmd_config(base); 309 struct page *page;
255 set_iost_origin(mmio_base); 310 int ret, i;
256} 311 unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
312 unsigned long xlate_base;
313 unsigned int virq;
314
315 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
316 panic("%s: missing IOC register mappings for node %d\n",
317 __FUNCTION__, iommu->nid);
318
319 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
320 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
321
322 segments = size >> IO_SEGMENT_SHIFT;
323 pages_per_segment = 1ull << IO_PAGENO_BITS;
324
325 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
326 __FUNCTION__, iommu->nid, segments, pages_per_segment);
327
328 /* set up the segment table */
329 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
330 BUG_ON(!page);
331 iommu->stab = page_address(page);
332 clear_page(iommu->stab);
333
334 /* ... and the page tables. Since these are contiguous, we can treat
335 * the page tables as one array of ptes, like pSeries does.
336 */
337 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
338 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
339 iommu->nid, ptab_size, get_order(ptab_size));
340 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
341 BUG_ON(!page);
342
343 iommu->ptab = page_address(page);
344 memset(iommu->ptab, 0, ptab_size);
345
346 /* allocate a bogus page for the end of each mapping */
347 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
348 BUG_ON(!page);
349 iommu->pad_page = page_address(page);
350 clear_page(iommu->pad_page);
351
352 /* number of pages needed for a page table */
353 n_pte_pages = (pages_per_segment *
354 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
355
356 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
357 __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
358 n_pte_pages);
359
360 /* initialise the STEs */
361 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
362
363 if (IOMMU_PAGE_SIZE == 0x1000)
364 reg |= IOSTE_PS_4K;
365 else if (IOMMU_PAGE_SIZE == 0x10000)
366 reg |= IOSTE_PS_64K;
367 else {
368 extern void __unknown_page_size_error(void);
369 __unknown_page_size_error();
370 }
371
372 pr_debug("Setting up IOMMU stab:\n");
373 for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
374 iommu->stab[i] = reg |
375 (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
376 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
377 }
257 378
258static void iommu_dev_setup_null(struct pci_dev *d) { } 379 /* ensure that the STEs have updated */
259static void iommu_bus_setup_null(struct pci_bus *b) { } 380 mb();
260 381
261struct cell_iommu { 382 /* setup interrupts for the iommu. */
262 unsigned long base; 383 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
263 unsigned long mmio_base; 384 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
264 void __iomem *mapped_base; 385 reg & ~IOC_IO_ExcpStat_V);
265 void __iomem *mapped_mmio_base; 386 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
266}; 387 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
267 388
268static struct cell_iommu cell_iommus[NR_CPUS]; 389 virq = irq_create_mapping(NULL,
390 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
391 BUG_ON(virq == NO_IRQ);
269 392
270/* initialize the iommu to support a simple linear mapping 393 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
271 * for each DMA window used by any device. For now, we 394 iommu->name, iommu);
272 * happen to know that there is only one DMA window in use, 395 BUG_ON(ret);
273 * starting at iopt_phys_offset. */
274static void cell_do_map_iommu(struct cell_iommu *iommu,
275 unsigned int ioid,
276 unsigned long map_start,
277 unsigned long map_size)
278{
279 unsigned long io_address, real_address;
280 void __iomem *ioc_base, *ioc_mmio_base;
281 ioste ioste;
282 unsigned long index;
283 396
284 /* we pretend the io page table was at a very high address */ 397 /* set the IOC segment table origin register (and turn on the iommu) */
285 const unsigned long fake_iopt = 0x10000000000ul; 398 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
286 const unsigned long io_page_size = 0x1000000; /* use 16M pages */ 399 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
287 const unsigned long io_segment_size = 0x10000000; /* 256M */ 400 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
288 401
289 ioc_base = iommu->mapped_base; 402 /* turn on IO translation */
290 ioc_mmio_base = iommu->mapped_mmio_base; 403 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
291 404 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
292 for (real_address = 0, io_address = map_start;
293 io_address <= map_start + map_size;
294 real_address += io_page_size, io_address += io_page_size) {
295 ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
296 if ((real_address % io_segment_size) == 0) /* segment start */
297 set_iost_cache(ioc_mmio_base,
298 io_address >> 28, ioste);
299 index = get_ioc_hash_1way(ioste, io_address);
300 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
301 io_address, index, ioste.val);
302 set_iopt_cache(ioc_mmio_base,
303 get_ioc_hash_1way(ioste, io_address),
304 get_ioc_tag(ioste, io_address),
305 get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
306 }
307} 405}
308 406
309static void iommu_devnode_setup(struct device_node *d) 407#if 0/* Unused for now */
408static struct iommu_window *find_window(struct cbe_iommu *iommu,
409 unsigned long offset, unsigned long size)
310{ 410{
311 const unsigned int *ioid; 411 struct iommu_window *window;
312 unsigned long map_start, map_size, token;
313 const unsigned long *dma_window;
314 struct cell_iommu *iommu;
315 412
316 ioid = get_property(d, "ioid", NULL); 413 /* todo: check for overlapping (but not equal) windows) */
317 if (!ioid)
318 pr_debug("No ioid entry found !\n");
319 414
320 dma_window = get_property(d, "ibm,dma-window", NULL); 415 list_for_each_entry(window, &(iommu->windows), list) {
321 if (!dma_window) 416 if (window->offset == offset && window->size == size)
322 pr_debug("No ibm,dma-window entry found !\n"); 417 return window;
418 }
323 419
324 map_start = dma_window[1]; 420 return NULL;
325 map_size = dma_window[2]; 421}
326 token = dma_window[0] >> 32; 422#endif
327 423
328 iommu = &cell_iommus[token]; 424static struct iommu_window * __init
425cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
426 unsigned long offset, unsigned long size,
427 unsigned long pte_offset)
428{
429 struct iommu_window *window;
430 const unsigned int *ioid;
329 431
330 cell_do_map_iommu(iommu, *ioid, map_start, map_size); 432 ioid = get_property(np, "ioid", NULL);
433 if (ioid == NULL)
434 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
435 np->full_name);
436
437 window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
438 BUG_ON(window == NULL);
439
440 window->offset = offset;
441 window->size = size;
442 window->ioid = ioid ? *ioid : 0;
443 window->iommu = iommu;
444 window->pte_offset = pte_offset;
445
446 window->table.it_blocksize = 16;
447 window->table.it_base = (unsigned long)iommu->ptab;
448 window->table.it_index = iommu->nid;
449 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
450 window->pte_offset;
451 window->table.it_size = size >> IOMMU_PAGE_SHIFT;
452
453 iommu_init_table(&window->table, iommu->nid);
454
455 pr_debug("\tioid %d\n", window->ioid);
456 pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
457 pr_debug("\tbase 0x%016lx\n", window->table.it_base);
458 pr_debug("\toffset 0x%lx\n", window->table.it_offset);
459 pr_debug("\tsize %ld\n", window->table.it_size);
460
461 list_add(&window->list, &iommu->windows);
462
463 if (offset != 0)
464 return window;
465
466 /* We need to map and reserve the first IOMMU page since it's used
467 * by the spider workaround. In theory, we only need to do that when
468 * running on spider but it doesn't really matter.
469 *
470 * This code also assumes that we have a window that starts at 0,
471 * which is the case on all spider based blades.
472 */
473 __set_bit(0, window->table.it_map);
474 tce_build_cell(&window->table, window->table.it_offset, 1,
475 (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
476 window->table.it_hint = window->table.it_blocksize;
477
478 return window;
331} 479}
332 480
333static void iommu_bus_setup(struct pci_bus *b) 481static struct cbe_iommu *cell_iommu_for_node(int nid)
334{ 482{
335 struct device_node *d = (struct device_node *)b->sysdata; 483 int i;
336 iommu_devnode_setup(d);
337}
338 484
485 for (i = 0; i < cbe_nr_iommus; i++)
486 if (iommus[i].nid == nid)
487 return &iommus[i];
488 return NULL;
489}
339 490
340static int cell_map_iommu_hardcoded(int num_nodes) 491static void cell_dma_dev_setup(struct device *dev)
341{ 492{
342 struct cell_iommu *iommu = NULL; 493 struct iommu_window *window;
343 494 struct cbe_iommu *iommu;
344 pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); 495 struct dev_archdata *archdata = &dev->archdata;
496
497 /* If we run without iommu, no need to do anything */
498 if (pci_dma_ops == &dma_direct_ops)
499 return;
500
501 /* Current implementation uses the first window available in that
502 * node's iommu. We -might- do something smarter later though it may
503 * never be necessary
504 */
505 iommu = cell_iommu_for_node(archdata->numa_node);
506 if (iommu == NULL || list_empty(&iommu->windows)) {
507 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
508 archdata->of_node ? archdata->of_node->full_name : "?",
509 archdata->numa_node);
510 return;
511 }
512 window = list_entry(iommu->windows.next, struct iommu_window, list);
345 513
346 /* node 0 */ 514 archdata->dma_data = &window->table;
347 iommu = &cell_iommus[0]; 515}
348 iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
349 iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
350 516
351 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); 517static void cell_pci_dma_dev_setup(struct pci_dev *dev)
518{
519 cell_dma_dev_setup(&dev->dev);
520}
352 521
353 cell_do_map_iommu(iommu, 0x048a, 522static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
354 0x20000000ul,0x20000000ul); 523 void *data)
524{
525 struct device *dev = data;
355 526
356 if (num_nodes < 2) 527 /* We are only intereted in device addition */
528 if (action != BUS_NOTIFY_ADD_DEVICE)
357 return 0; 529 return 0;
358 530
359 /* node 1 */ 531 /* We use the PCI DMA ops */
360 iommu = &cell_iommus[1]; 532 dev->archdata.dma_ops = pci_dma_ops;
361 iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
362 iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
363
364 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
365 533
366 cell_do_map_iommu(iommu, 0x048a, 534 cell_dma_dev_setup(dev);
367 0x20000000,0x20000000ul);
368 535
369 return 0; 536 return 0;
370} 537}
371 538
539static struct notifier_block cell_of_bus_notifier = {
540 .notifier_call = cell_of_bus_notify
541};
372 542
373static int cell_map_iommu(void) 543static int __init cell_iommu_get_window(struct device_node *np,
544 unsigned long *base,
545 unsigned long *size)
374{ 546{
375 unsigned int num_nodes = 0; 547 const void *dma_window;
376 const unsigned int *node_id; 548 unsigned long index;
377 const unsigned long *base, *mmio_base;
378 struct device_node *dn;
379 struct cell_iommu *iommu = NULL;
380
381 /* determine number of nodes (=iommus) */
382 pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
383 for(dn = of_find_node_by_type(NULL, "cpu");
384 dn;
385 dn = of_find_node_by_type(dn, "cpu")) {
386 node_id = get_property(dn, "node-id", NULL);
387
388 if (num_nodes < *node_id)
389 num_nodes = *node_id;
390 }
391
392 num_nodes++;
393 pr_debug("%i found.\n", num_nodes);
394 549
395 /* map the iommu registers for each node */ 550 /* Use ibm,dma-window if available, else, hard code ! */
396 pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__); 551 dma_window = get_property(np, "ibm,dma-window", NULL);
397 for(dn = of_find_node_by_type(NULL, "cpu"); 552 if (dma_window == NULL) {
398 dn; 553 *base = 0;
399 dn = of_find_node_by_type(dn, "cpu")) { 554 *size = 0x80000000u;
555 return -ENODEV;
556 }
400 557
401 node_id = get_property(dn, "node-id", NULL); 558 of_parse_dma_window(np, dma_window, &index, base, size);
402 base = get_property(dn, "ioc-cache", NULL); 559 return 0;
403 mmio_base = get_property(dn, "ioc-translation", NULL); 560}
404 561
405 if (!base || !mmio_base || !node_id) 562static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
406 return cell_map_iommu_hardcoded(num_nodes); 563{
564 struct cbe_iommu *iommu;
565 unsigned long base, size;
566 int nid, i;
567
568 /* Get node ID */
569 nid = of_node_to_nid(np);
570 if (nid < 0) {
571 printk(KERN_ERR "iommu: failed to get node for %s\n",
572 np->full_name);
573 return;
574 }
575 pr_debug("iommu: setting up iommu for node %d (%s)\n",
576 nid, np->full_name);
577
578 /* XXX todo: If we can have multiple windows on the same IOMMU, which
579 * isn't the case today, we probably want here to check wether the
580 * iommu for that node is already setup.
581 * However, there might be issue with getting the size right so let's
582 * ignore that for now. We might want to completely get rid of the
583 * multiple window support since the cell iommu supports per-page ioids
584 */
585
586 if (cbe_nr_iommus >= NR_IOMMUS) {
587 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
588 np->full_name);
589 return;
590 }
407 591
408 iommu = &cell_iommus[*node_id]; 592 /* Init base fields */
409 iommu->base = *base; 593 i = cbe_nr_iommus++;
410 iommu->mmio_base = *mmio_base; 594 iommu = &iommus[i];
595 iommu->stab = 0;
596 iommu->nid = nid;
597 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
598 INIT_LIST_HEAD(&iommu->windows);
411 599
412 iommu->mapped_base = ioremap(*base, 0x1000); 600 /* Obtain a window for it */
413 iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000); 601 cell_iommu_get_window(np, &base, &size);
414 602
415 enable_mapping(iommu->mapped_base, 603 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
416 iommu->mapped_mmio_base); 604 base, base + size - 1);
417 605
418 /* everything else will be done in iommu_bus_setup */ 606 /* Initialize the hardware */
419 } 607 cell_iommu_setup_hardware(iommu, size);
420 608
421 return 1; 609 /* Setup the iommu_table */
610 cell_iommu_setup_window(iommu, np, base, size,
611 offset >> IOMMU_PAGE_SHIFT);
422} 612}
423 613
424static void *cell_alloc_coherent(struct device *hwdev, size_t size, 614static void __init cell_disable_iommus(void)
425 dma_addr_t *dma_handle, gfp_t flag)
426{ 615{
427 void *ret; 616 int node;
428 617 unsigned long base, val;
429 ret = (void *)__get_free_pages(flag, get_order(size)); 618 void __iomem *xregs, *cregs;
430 if (ret != NULL) { 619
431 memset(ret, 0, size); 620 /* Make sure IOC translation is disabled on all nodes */
432 *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID; 621 for_each_online_node(node) {
622 if (cell_iommu_find_ioc(node, &base))
623 continue;
624 xregs = ioremap(base, IOC_Reg_Size);
625 if (xregs == NULL)
626 continue;
627 cregs = xregs + IOC_IOCmd_Offset;
628
629 pr_debug("iommu: cleaning up iommu on node %d\n", node);
630
631 out_be64(xregs + IOC_IOST_Origin, 0);
632 (void)in_be64(xregs + IOC_IOST_Origin);
633 val = in_be64(cregs + IOC_IOCmd_Cfg);
634 val &= ~IOC_IOCmd_Cfg_TE;
635 out_be64(cregs + IOC_IOCmd_Cfg, val);
636 (void)in_be64(cregs + IOC_IOCmd_Cfg);
637
638 iounmap(xregs);
433 } 639 }
434 return ret;
435} 640}
436 641
437static void cell_free_coherent(struct device *hwdev, size_t size, 642static int __init cell_iommu_init_disabled(void)
438 void *vaddr, dma_addr_t dma_handle)
439{ 643{
440 free_pages((unsigned long)vaddr, get_order(size)); 644 struct device_node *np = NULL;
441} 645 unsigned long base = 0, size;
646
647 /* When no iommu is present, we use direct DMA ops */
648 pci_dma_ops = &dma_direct_ops;
649
650 /* First make sure all IOC translation is turned off */
651 cell_disable_iommus();
652
653 /* If we have no Axon, we set up the spider DMA magic offset */
654 if (of_find_node_by_name(NULL, "axon") == NULL)
655 dma_direct_offset = SPIDER_DMA_OFFSET;
656
657 /* Now we need to check to see where the memory is mapped
658 * in PCI space. We assume that all busses use the same dma
659 * window which is always the case so far on Cell, thus we
660 * pick up the first pci-internal node we can find and check
661 * the DMA window from there.
662 */
663 for_each_node_by_name(np, "axon") {
664 if (np->parent == NULL || np->parent->parent != NULL)
665 continue;
666 if (cell_iommu_get_window(np, &base, &size) == 0)
667 break;
668 }
669 if (np == NULL) {
670 for_each_node_by_name(np, "pci-internal") {
671 if (np->parent == NULL || np->parent->parent != NULL)
672 continue;
673 if (cell_iommu_get_window(np, &base, &size) == 0)
674 break;
675 }
676 }
677 of_node_put(np);
678
679 /* If we found a DMA window, we check if it's big enough to enclose
680 * all of physical memory. If not, we force enable IOMMU
681 */
682 if (np && size < lmb_end_of_DRAM()) {
683 printk(KERN_WARNING "iommu: force-enabled, dma window"
684 " (%ldMB) smaller than total memory (%ldMB)\n",
685 size >> 20, lmb_end_of_DRAM() >> 20);
686 return -ENODEV;
687 }
442 688
443static dma_addr_t cell_map_single(struct device *hwdev, void *ptr, 689 dma_direct_offset += base;
444 size_t size, enum dma_data_direction direction)
445{
446 return virt_to_abs(ptr) | CELL_DMA_VALID;
447}
448 690
449static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 691 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
450 size_t size, enum dma_data_direction direction) 692 dma_direct_offset);
451{ 693
694 return 0;
452} 695}
453 696
454static int cell_map_sg(struct device *hwdev, struct scatterlist *sg, 697static int __init cell_iommu_init(void)
455 int nents, enum dma_data_direction direction)
456{ 698{
457 int i; 699 struct device_node *np;
700
701 if (!machine_is(cell))
702 return -ENODEV;
703
704 /* If IOMMU is disabled or we have little enough RAM to not need
705 * to enable it, we setup a direct mapping.
706 *
707 * Note: should we make sure we have the IOMMU actually disabled ?
708 */
709 if (iommu_is_off ||
710 (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
711 if (cell_iommu_init_disabled() == 0)
712 goto bail;
713
714 /* Setup various ppc_md. callbacks */
715 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
716 ppc_md.tce_build = tce_build_cell;
717 ppc_md.tce_free = tce_free_cell;
718
719 /* Create an iommu for each /axon node. */
720 for_each_node_by_name(np, "axon") {
721 if (np->parent == NULL || np->parent->parent != NULL)
722 continue;
723 cell_iommu_init_one(np, 0);
724 }
458 725
459 for (i = 0; i < nents; i++, sg++) { 726 /* Create an iommu for each toplevel /pci-internal node for
460 sg->dma_address = (page_to_phys(sg->page) + sg->offset) 727 * old hardware/firmware
461 | CELL_DMA_VALID; 728 */
462 sg->dma_length = sg->length; 729 for_each_node_by_name(np, "pci-internal") {
730 if (np->parent == NULL || np->parent->parent != NULL)
731 continue;
732 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
463 } 733 }
464 734
465 return nents; 735 /* Setup default PCI iommu ops */
466} 736 pci_dma_ops = &dma_iommu_ops;
467 737
468static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg, 738 bail:
469 int nents, enum dma_data_direction direction) 739 /* Register callbacks on OF platform device addition/removal
470{ 740 * to handle linking them to the right DMA operations
471} 741 */
742 bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier);
472 743
473static int cell_dma_supported(struct device *dev, u64 mask) 744 return 0;
474{
475 return mask < 0x100000000ull;
476} 745}
746arch_initcall(cell_iommu_init);
477 747
478static struct dma_mapping_ops cell_iommu_ops = {
479 .alloc_coherent = cell_alloc_coherent,
480 .free_coherent = cell_free_coherent,
481 .map_single = cell_map_single,
482 .unmap_single = cell_unmap_single,
483 .map_sg = cell_map_sg,
484 .unmap_sg = cell_unmap_sg,
485 .dma_supported = cell_dma_supported,
486};
487
488void cell_init_iommu(void)
489{
490 int setup_bus = 0;
491
492 if (of_find_node_by_path("/mambo")) {
493 pr_info("Not using iommu on systemsim\n");
494 } else {
495
496 if (!(of_chosen &&
497 get_property(of_chosen, "linux,iommu-off", NULL)))
498 setup_bus = cell_map_iommu();
499
500 if (setup_bus) {
501 pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
502 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
503 ppc_md.iommu_bus_setup = iommu_bus_setup;
504 } else {
505 pr_debug("%s: IOMMU mapping activated, "
506 "no device action necessary\n", __FUNCTION__);
507 /* Direct I/O, IOMMU off */
508 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
509 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
510 }
511 }
512
513 pci_dma_ops = cell_iommu_ops;
514}
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
deleted file mode 100644
index 490d77abfe85..000000000000
--- a/arch/powerpc/platforms/cell/iommu.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef CELL_IOMMU_H
2#define CELL_IOMMU_H
3
4/* some constants */
5enum {
6 /* segment table entries */
7 IOST_VALID_MASK = 0x8000000000000000ul,
8 IOST_TAG_MASK = 0x3000000000000000ul,
9 IOST_PT_BASE_MASK = 0x000003fffffff000ul,
10 IOST_NNPT_MASK = 0x0000000000000fe0ul,
11 IOST_PS_MASK = 0x000000000000000ful,
12
13 IOST_PS_4K = 0x1,
14 IOST_PS_64K = 0x3,
15 IOST_PS_1M = 0x5,
16 IOST_PS_16M = 0x7,
17
18 /* iopt tag register */
19 IOPT_VALID_MASK = 0x0000000200000000ul,
20 IOPT_TAG_MASK = 0x00000001fffffffful,
21
22 /* iopt cache register */
23 IOPT_PROT_MASK = 0xc000000000000000ul,
24 IOPT_PROT_NONE = 0x0000000000000000ul,
25 IOPT_PROT_READ = 0x4000000000000000ul,
26 IOPT_PROT_WRITE = 0x8000000000000000ul,
27 IOPT_PROT_RW = 0xc000000000000000ul,
28 IOPT_COHERENT = 0x2000000000000000ul,
29
30 IOPT_ORDER_MASK = 0x1800000000000000ul,
31 /* order access to same IOID/VC on same address */
32 IOPT_ORDER_ADDR = 0x0800000000000000ul,
33 /* similar, but only after a write access */
34 IOPT_ORDER_WRITES = 0x1000000000000000ul,
35 /* Order all accesses to same IOID/VC */
36 IOPT_ORDER_VC = 0x1800000000000000ul,
37
38 IOPT_RPN_MASK = 0x000003fffffff000ul,
39 IOPT_HINT_MASK = 0x0000000000000800ul,
40 IOPT_IOID_MASK = 0x00000000000007fful,
41
42 IOSTO_ENABLE = 0x8000000000000000ul,
43 IOSTO_ORIGIN = 0x000003fffffff000ul,
44 IOSTO_HW = 0x0000000000000800ul,
45 IOSTO_SW = 0x0000000000000400ul,
46
47 IOCMD_CONF_TE = 0x0000800000000000ul,
48
49 /* memory mapped registers */
50 IOC_PT_CACHE_DIR = 0x000,
51 IOC_ST_CACHE_DIR = 0x800,
52 IOC_PT_CACHE_REG = 0x910,
53 IOC_ST_ORIGIN = 0x918,
54 IOC_CONF = 0x930,
55
56 /* The high bit needs to be set on every DMA address,
57 only 2GB are addressable */
58 CELL_DMA_VALID = 0x80000000,
59 CELL_DMA_MASK = 0x7fffffff,
60};
61
62
63void cell_init_iommu(void);
64
65#endif
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 9f2e4ed20a57..8c20f0fb8651 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -38,32 +38,25 @@
38#include "pervasive.h" 38#include "pervasive.h"
39#include "cbe_regs.h" 39#include "cbe_regs.h"
40 40
41static DEFINE_SPINLOCK(cbe_pervasive_lock); 41static void cbe_power_save(void)
42
43static void __init cbe_enable_pause_zero(void)
44{ 42{
45 unsigned long thread_switch_control; 43 unsigned long ctrl, thread_switch_control;
46 unsigned long temp_register;
47 struct cbe_pmd_regs __iomem *pregs;
48
49 spin_lock_irq(&cbe_pervasive_lock);
50 pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
51 if (pregs == NULL)
52 goto out;
53 44
54 pr_debug("Power Management: CPU %d\n", smp_processor_id()); 45 /*
55 46 * We need to hard disable interrupts, but we also need to mark them
56 /* Enable Pause(0) control bit */ 47 * hard disabled in the PACA so that the local_irq_enable() done by
57 temp_register = in_be64(&pregs->pm_control); 48 * our caller upon return propertly hard enables.
49 */
50 hard_irq_disable();
51 get_paca()->hard_enabled = 0;
58 52
59 out_be64(&pregs->pm_control, 53 ctrl = mfspr(SPRN_CTRLF);
60 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
61 54
62 /* Enable DEC and EE interrupt request */ 55 /* Enable DEC and EE interrupt request */
63 thread_switch_control = mfspr(SPRN_TSC_CELL); 56 thread_switch_control = mfspr(SPRN_TSC_CELL);
64 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; 57 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
65 58
66 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { 59 switch (ctrl & CTRL_CT) {
67 case CTRL_CT0: 60 case CTRL_CT0:
68 thread_switch_control |= TSC_CELL_DEC_ENABLE_0; 61 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
69 break; 62 break;
@@ -75,58 +68,21 @@ static void __init cbe_enable_pause_zero(void)
75 __FUNCTION__); 68 __FUNCTION__);
76 break; 69 break;
77 } 70 }
78
79 mtspr(SPRN_TSC_CELL, thread_switch_control); 71 mtspr(SPRN_TSC_CELL, thread_switch_control);
80 72
81out: 73 /*
82 spin_unlock_irq(&cbe_pervasive_lock); 74 * go into low thread priority, medium priority will be
83} 75 * restored for us after wake-up.
84 76 */
85static void cbe_idle(void) 77 HMT_low();
86{
87 unsigned long ctrl;
88 78
89 /* Why do we do that on every idle ? Couldn't that be done once for 79 /*
90 * all or do we lose the state some way ? Also, the pm_control 80 * atomically disable thread execution and runlatch.
91 * register setting, that can't be set once at boot ? We really want 81 * External and Decrementer exceptions are still handled when the
92 * to move that away in order to implement a simple powersave 82 * thread is disabled but now enter in cbe_system_reset_exception()
93 */ 83 */
94 cbe_enable_pause_zero(); 84 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
95 85 mtspr(SPRN_CTRLT, ctrl);
96 while (1) {
97 if (!need_resched()) {
98 local_irq_disable();
99 while (!need_resched()) {
100 /* go into low thread priority */
101 HMT_low();
102
103 /*
104 * atomically disable thread execution
105 * and runlatch.
106 * External and Decrementer exceptions
107 * are still handled when the thread
108 * is disabled but now enter in
109 * cbe_system_reset_exception()
110 */
111 ctrl = mfspr(SPRN_CTRLF);
112 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
113 mtspr(SPRN_CTRLT, ctrl);
114 }
115 /* restore thread prio */
116 HMT_medium();
117 local_irq_enable();
118 }
119
120 /*
121 * turn runlatch on again before scheduling the
122 * process we just woke up
123 */
124 ppc64_runlatch_on();
125
126 preempt_enable_no_resched();
127 schedule();
128 preempt_disable();
129 }
130} 86}
131 87
132static int cbe_system_reset_exception(struct pt_regs *regs) 88static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +114,20 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
158 114
159void __init cbe_pervasive_init(void) 115void __init cbe_pervasive_init(void)
160{ 116{
117 int cpu;
161 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) 118 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
162 return; 119 return;
163 120
164 ppc_md.idle_loop = cbe_idle; 121 for_each_possible_cpu(cpu) {
122 struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
123 if (!regs)
124 continue;
125
126 /* Enable Pause(0) control bit */
127 out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
128 CBE_PMD_PAUSE_ZERO_CONTROL);
129 }
130
131 ppc_md.power_save = cbe_power_save;
165 ppc_md.system_reset_exception = cbe_system_reset_exception; 132 ppc_md.system_reset_exception = cbe_system_reset_exception;
166} 133}
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644
index 000000000000..d04ae1671e6c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -0,0 +1,432 @@
1/*
2 * Cell Broadband Engine Performance Monitor
3 *
4 * (C) Copyright IBM Corporation 2001,2006
5 *
6 * Author:
7 * David Erb (djerb@us.ibm.com)
8 * Kevin Corry (kevcorry@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/interrupt.h>
26#include <linux/types.h>
27#include <asm/io.h>
28#include <asm/irq_regs.h>
29#include <asm/machdep.h>
30#include <asm/pmc.h>
31#include <asm/reg.h>
32#include <asm/spu.h>
33
34#include "cbe_regs.h"
35#include "interrupt.h"
36
37/*
38 * When writing to write-only mmio addresses, save a shadow copy. All of the
39 * registers are 32-bit, but stored in the upper-half of a 64-bit field in
40 * pmd_regs.
41 */
42
43#define WRITE_WO_MMIO(reg, x) \
44 do { \
45 u32 _x = (x); \
46 struct cbe_pmd_regs __iomem *pmd_regs; \
47 struct cbe_pmd_shadow_regs *shadow_regs; \
48 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
49 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
50 out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
51 shadow_regs->reg = _x; \
52 } while (0)
53
54#define READ_SHADOW_REG(val, reg) \
55 do { \
56 struct cbe_pmd_shadow_regs *shadow_regs; \
57 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
58 (val) = shadow_regs->reg; \
59 } while (0)
60
61#define READ_MMIO_UPPER32(val, reg) \
62 do { \
63 struct cbe_pmd_regs __iomem *pmd_regs; \
64 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
65 (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
66 } while (0)
67
68/*
69 * Physical counter registers.
70 * Each physical counter can act as one 32-bit counter or two 16-bit counters.
71 */
72
73u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
74{
75 u32 val_in_latch, val = 0;
76
77 if (phys_ctr < NR_PHYS_CTRS) {
78 READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
79
80 /* Read the latch or the actual counter, whichever is newer. */
81 if (val_in_latch & (1 << phys_ctr)) {
82 READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
83 } else {
84 READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
85 }
86 }
87
88 return val;
89}
90EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
91
92void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
93{
94 struct cbe_pmd_shadow_regs *shadow_regs;
95 u32 pm_ctrl;
96
97 if (phys_ctr < NR_PHYS_CTRS) {
98 /* Writing to a counter only writes to a hardware latch.
99 * The new value is not propagated to the actual counter
100 * until the performance monitor is enabled.
101 */
102 WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
103
104 pm_ctrl = cbe_read_pm(cpu, pm_control);
105 if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
106 /* The counters are already active, so we need to
107 * rewrite the pm_control register to "re-enable"
108 * the PMU.
109 */
110 cbe_write_pm(cpu, pm_control, pm_ctrl);
111 } else {
112 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
113 shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
114 }
115 }
116}
117EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
118
119/*
120 * "Logical" counter registers.
121 * These will read/write 16-bits or 32-bits depending on the
122 * current size of the counter. Counters 4 - 7 are always 16-bit.
123 */
124
125u32 cbe_read_ctr(u32 cpu, u32 ctr)
126{
127 u32 val;
128 u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
129
130 val = cbe_read_phys_ctr(cpu, phys_ctr);
131
132 if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
133 val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
134
135 return val;
136}
137EXPORT_SYMBOL_GPL(cbe_read_ctr);
138
139void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
140{
141 u32 phys_ctr;
142 u32 phys_val;
143
144 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
145
146 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
147 phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
148
149 if (ctr < NR_PHYS_CTRS)
150 val = (val << 16) | (phys_val & 0xffff);
151 else
152 val = (val & 0xffff) | (phys_val & 0xffff0000);
153 }
154
155 cbe_write_phys_ctr(cpu, phys_ctr, val);
156}
157EXPORT_SYMBOL_GPL(cbe_write_ctr);
158
159/*
160 * Counter-control registers.
161 * Each "logical" counter has a corresponding control register.
162 */
163
164u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
165{
166 u32 pm07_control = 0;
167
168 if (ctr < NR_CTRS)
169 READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
170
171 return pm07_control;
172}
173EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
174
175void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
176{
177 if (ctr < NR_CTRS)
178 WRITE_WO_MMIO(pm07_control[ctr], val);
179}
180EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
181
182/*
183 * Other PMU control registers. Most of these are write-only.
184 */
185
186u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
187{
188 u32 val = 0;
189
190 switch (reg) {
191 case group_control:
192 READ_SHADOW_REG(val, group_control);
193 break;
194
195 case debug_bus_control:
196 READ_SHADOW_REG(val, debug_bus_control);
197 break;
198
199 case trace_address:
200 READ_MMIO_UPPER32(val, trace_address);
201 break;
202
203 case ext_tr_timer:
204 READ_SHADOW_REG(val, ext_tr_timer);
205 break;
206
207 case pm_status:
208 READ_MMIO_UPPER32(val, pm_status);
209 break;
210
211 case pm_control:
212 READ_SHADOW_REG(val, pm_control);
213 break;
214
215 case pm_interval:
216 READ_SHADOW_REG(val, pm_interval);
217 break;
218
219 case pm_start_stop:
220 READ_SHADOW_REG(val, pm_start_stop);
221 break;
222 }
223
224 return val;
225}
226EXPORT_SYMBOL_GPL(cbe_read_pm);
227
228void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
229{
230 switch (reg) {
231 case group_control:
232 WRITE_WO_MMIO(group_control, val);
233 break;
234
235 case debug_bus_control:
236 WRITE_WO_MMIO(debug_bus_control, val);
237 break;
238
239 case trace_address:
240 WRITE_WO_MMIO(trace_address, val);
241 break;
242
243 case ext_tr_timer:
244 WRITE_WO_MMIO(ext_tr_timer, val);
245 break;
246
247 case pm_status:
248 WRITE_WO_MMIO(pm_status, val);
249 break;
250
251 case pm_control:
252 WRITE_WO_MMIO(pm_control, val);
253 break;
254
255 case pm_interval:
256 WRITE_WO_MMIO(pm_interval, val);
257 break;
258
259 case pm_start_stop:
260 WRITE_WO_MMIO(pm_start_stop, val);
261 break;
262 }
263}
264EXPORT_SYMBOL_GPL(cbe_write_pm);
265
266/*
267 * Get/set the size of a physical counter to either 16 or 32 bits.
268 */
269
270u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
271{
272 u32 pm_ctrl, size = 0;
273
274 if (phys_ctr < NR_PHYS_CTRS) {
275 pm_ctrl = cbe_read_pm(cpu, pm_control);
276 size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
277 }
278
279 return size;
280}
281EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
282
283void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
284{
285 u32 pm_ctrl;
286
287 if (phys_ctr < NR_PHYS_CTRS) {
288 pm_ctrl = cbe_read_pm(cpu, pm_control);
289 switch (ctr_size) {
290 case 16:
291 pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
292 break;
293
294 case 32:
295 pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
296 break;
297 }
298 cbe_write_pm(cpu, pm_control, pm_ctrl);
299 }
300}
301EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
302
303/*
304 * Enable/disable the entire performance monitoring unit.
305 * When we enable the PMU, all pending writes to counters get committed.
306 */
307
308void cbe_enable_pm(u32 cpu)
309{
310 struct cbe_pmd_shadow_regs *shadow_regs;
311 u32 pm_ctrl;
312
313 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
314 shadow_regs->counter_value_in_latch = 0;
315
316 pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
317 cbe_write_pm(cpu, pm_control, pm_ctrl);
318}
319EXPORT_SYMBOL_GPL(cbe_enable_pm);
320
321void cbe_disable_pm(u32 cpu)
322{
323 u32 pm_ctrl;
324 pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
325 cbe_write_pm(cpu, pm_control, pm_ctrl);
326}
327EXPORT_SYMBOL_GPL(cbe_disable_pm);
328
329/*
330 * Reading from the trace_buffer.
331 * The trace buffer is two 64-bit registers. Reading from
332 * the second half automatically increments the trace_address.
333 */
334
335void cbe_read_trace_buffer(u32 cpu, u64 *buf)
336{
337 struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
338
339 *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
340 *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
341}
342EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
343
344/*
345 * Enabling/disabling interrupts for the entire performance monitoring unit.
346 */
347
348u32 cbe_query_pm_interrupts(u32 cpu)
349{
350 return cbe_read_pm(cpu, pm_status);
351}
352EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
353
354u32 cbe_clear_pm_interrupts(u32 cpu)
355{
356 /* Reading pm_status clears the interrupt bits. */
357 return cbe_query_pm_interrupts(cpu);
358}
359EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
360
361void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
362{
363 /* Set which node and thread will handle the next interrupt. */
364 iic_set_interrupt_routing(cpu, thread, 0);
365
366 /* Enable the interrupt bits in the pm_status register. */
367 if (mask)
368 cbe_write_pm(cpu, pm_status, mask);
369}
370EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
371
372void cbe_disable_pm_interrupts(u32 cpu)
373{
374 cbe_clear_pm_interrupts(cpu);
375 cbe_write_pm(cpu, pm_status, 0);
376}
377EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
378
379static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
380{
381 perf_irq(get_irq_regs());
382 return IRQ_HANDLED;
383}
384
385static int __init cbe_init_pm_irq(void)
386{
387 unsigned int irq;
388 int rc, node;
389
390 if (!machine_is(cell))
391 return 0;
392
393 for_each_node(node) {
394 irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
395 (node << IIC_IRQ_NODE_SHIFT));
396 if (irq == NO_IRQ) {
397 printk("ERROR: Unable to allocate irq for node %d\n",
398 node);
399 return -EINVAL;
400 }
401
402 rc = request_irq(irq, cbe_pm_irq,
403 IRQF_DISABLED, "cbe-pmu-0", NULL);
404 if (rc) {
405 printk("ERROR: Request for irq on node %d failed\n",
406 node);
407 return rc;
408 }
409 }
410
411 return 0;
412}
413arch_initcall(cbe_init_pm_irq);
414
415void cbe_sync_irq(int node)
416{
417 unsigned int irq;
418
419 irq = irq_find_mapping(NULL,
420 IIC_IRQ_IOEX_PMI
421 | (node << IIC_IRQ_NODE_SHIFT));
422
423 if (irq == NO_IRQ) {
424 printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
425 "for node %d\n", irq, node);
426 return;
427 }
428
429 synchronize_irq(irq);
430}
431EXPORT_SYMBOL_GPL(cbe_sync_irq);
432
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 22c228a49c33..36989c2eee66 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -50,9 +50,10 @@
50#include <asm/spu.h> 50#include <asm/spu.h>
51#include <asm/spu_priv1.h> 51#include <asm/spu_priv1.h>
52#include <asm/udbg.h> 52#include <asm/udbg.h>
53#include <asm/mpic.h>
54#include <asm/of_platform.h>
53 55
54#include "interrupt.h" 56#include "interrupt.h"
55#include "iommu.h"
56#include "cbe_regs.h" 57#include "cbe_regs.h"
57#include "pervasive.h" 58#include "pervasive.h"
58#include "ras.h" 59#include "ras.h"
@@ -80,24 +81,72 @@ static void cell_progress(char *s, unsigned short hex)
80 printk("*** %04x : %s\n", hex, s ? s : ""); 81 printk("*** %04x : %s\n", hex, s ? s : "");
81} 82}
82 83
83static void __init cell_pcibios_fixup(void) 84static int __init cell_publish_devices(void)
84{ 85{
85 struct pci_dev *dev = NULL; 86 if (!machine_is(cell))
87 return 0;
88
89 /* Publish OF platform devices for southbridge IOs */
90 of_platform_bus_probe(NULL, NULL, NULL);
91
92 return 0;
93}
94device_initcall(cell_publish_devices);
95
96static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
97{
98 struct mpic *mpic = desc->handler_data;
99 unsigned int virq;
100
101 virq = mpic_get_one_irq(mpic);
102 if (virq != NO_IRQ)
103 generic_handle_irq(virq);
104 desc->chip->eoi(irq);
105}
86 106
87 for_each_pci_dev(dev) 107static void __init mpic_init_IRQ(void)
88 pci_read_irq_line(dev); 108{
109 struct device_node *dn;
110 struct mpic *mpic;
111 unsigned int virq;
112
113 for (dn = NULL;
114 (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
115 if (!device_is_compatible(dn, "CBEA,platform-open-pic"))
116 continue;
117
118 /* The MPIC driver will get everything it needs from the
119 * device-tree, just pass 0 to all arguments
120 */
121 mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC ");
122 if (mpic == NULL)
123 continue;
124 mpic_init(mpic);
125
126 virq = irq_of_parse_and_map(dn, 0);
127 if (virq == NO_IRQ)
128 continue;
129
130 printk(KERN_INFO "%s : hooking up to IRQ %d\n",
131 dn->full_name, virq);
132 set_irq_data(virq, mpic);
133 set_irq_chained_handler(virq, cell_mpic_cascade);
134 }
89} 135}
90 136
137
91static void __init cell_init_irq(void) 138static void __init cell_init_irq(void)
92{ 139{
93 iic_init_IRQ(); 140 iic_init_IRQ();
94 spider_init_IRQ(); 141 spider_init_IRQ();
142 mpic_init_IRQ();
95} 143}
96 144
97static void __init cell_setup_arch(void) 145static void __init cell_setup_arch(void)
98{ 146{
99#ifdef CONFIG_SPU_BASE 147#ifdef CONFIG_SPU_BASE
100 spu_priv1_ops = &spu_priv1_mmio_ops; 148 spu_priv1_ops = &spu_priv1_mmio_ops;
149 spu_management_ops = &spu_management_of_ops;
101#endif 150#endif
102 151
103 cbe_regs_init(); 152 cbe_regs_init();
@@ -109,7 +158,6 @@ static void __init cell_setup_arch(void)
109#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
110 smp_init_cell(); 159 smp_init_cell();
111#endif 160#endif
112
113 /* init to some ~sane value until calibrate_delay() runs */ 161 /* init to some ~sane value until calibrate_delay() runs */
114 loops_per_jiffy = 50000000; 162 loops_per_jiffy = 50000000;
115 163
@@ -129,19 +177,6 @@ static void __init cell_setup_arch(void)
129 mmio_nvram_init(); 177 mmio_nvram_init();
130} 178}
131 179
132/*
133 * Early initialization. Relocation is on but do not reference unbolted pages
134 */
135static void __init cell_init_early(void)
136{
137 DBG(" -> cell_init_early()\n");
138
139 cell_init_iommu();
140
141 DBG(" <- cell_init_early()\n");
142}
143
144
145static int __init cell_probe(void) 180static int __init cell_probe(void)
146{ 181{
147 unsigned long root = of_get_flat_dt_root(); 182 unsigned long root = of_get_flat_dt_root();
@@ -168,7 +203,6 @@ define_machine(cell) {
168 .name = "Cell", 203 .name = "Cell",
169 .probe = cell_probe, 204 .probe = cell_probe,
170 .setup_arch = cell_setup_arch, 205 .setup_arch = cell_setup_arch,
171 .init_early = cell_init_early,
172 .show_cpuinfo = cell_show_cpuinfo, 206 .show_cpuinfo = cell_show_cpuinfo,
173 .restart = rtas_restart, 207 .restart = rtas_restart,
174 .power_off = rtas_power_off, 208 .power_off = rtas_power_off,
@@ -180,7 +214,7 @@ define_machine(cell) {
180 .check_legacy_ioport = cell_check_legacy_ioport, 214 .check_legacy_ioport = cell_check_legacy_ioport,
181 .progress = cell_progress, 215 .progress = cell_progress,
182 .init_IRQ = cell_init_irq, 216 .init_IRQ = cell_init_irq,
183 .pcibios_fixup = cell_pcibios_fixup, 217 .pci_setup_phb = rtas_setup_phb,
184#ifdef CONFIG_KEXEC 218#ifdef CONFIG_KEXEC
185 .machine_kexec = default_machine_kexec, 219 .machine_kexec = default_machine_kexec,
186 .machine_kexec_prepare = default_machine_kexec_prepare, 220 .machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7aa809d5a244..bd7bffc3ddd0 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,22 +25,17 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/ptrace.h> 28#include <linux/ptrace.h>
31#include <linux/slab.h> 29#include <linux/slab.h>
32#include <linux/wait.h> 30#include <linux/wait.h>
33 31#include <linux/mm.h>
34#include <asm/firmware.h> 32#include <linux/io.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <linux/mutex.h> 33#include <linux/mutex.h>
38#include <asm/spu.h> 34#include <asm/spu.h>
39#include <asm/spu_priv1.h> 35#include <asm/spu_priv1.h>
40#include <asm/mmu_context.h> 36#include <asm/xmon.h>
41
42#include "interrupt.h"
43 37
38const struct spu_management_ops *spu_management_ops;
44const struct spu_priv1_ops *spu_priv1_ops; 39const struct spu_priv1_ops *spu_priv1_ops;
45 40
46EXPORT_SYMBOL_GPL(spu_priv1_ops); 41EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -89,7 +84,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
89 printk("%s: invalid access during switch!\n", __func__); 84 printk("%s: invalid access during switch!\n", __func__);
90 return 1; 85 return 1;
91 } 86 }
92 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { 87 esid = (ea & ESID_MASK) | SLB_ESID_V;
88
89 switch(REGION_ID(ea)) {
90 case USER_REGION_ID:
91#ifdef CONFIG_HUGETLB_PAGE
92 if (in_hugepage_area(mm->context, ea))
93 llp = mmu_psize_defs[mmu_huge_psize].sllp;
94 else
95#endif
96 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
97 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
98 SLB_VSID_USER | llp;
99 break;
100 case VMALLOC_REGION_ID:
101 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
102 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
103 SLB_VSID_KERNEL | llp;
104 break;
105 case KERNEL_REGION_ID:
106 llp = mmu_psize_defs[mmu_linear_psize].sllp;
107 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_KERNEL | llp;
109 break;
110 default:
93 /* Future: support kernel segments so that drivers 111 /* Future: support kernel segments so that drivers
94 * can use SPUs. 112 * can use SPUs.
95 */ 113 */
@@ -97,16 +115,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
97 return 1; 115 return 1;
98 } 116 }
99 117
100 esid = (ea & ESID_MASK) | SLB_ESID_V;
101#ifdef CONFIG_HUGETLB_PAGE
102 if (in_hugepage_area(mm->context, ea))
103 llp = mmu_psize_defs[mmu_huge_psize].sllp;
104 else
105#endif
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_USER | llp;
109
110 out_be64(&priv2->slb_index_W, spu->slb_replace); 118 out_be64(&priv2->slb_index_W, spu->slb_replace);
111 out_be64(&priv2->slb_vsid_RW, vsid); 119 out_be64(&priv2->slb_vsid_RW, vsid);
112 out_be64(&priv2->slb_esid_RW, esid); 120 out_be64(&priv2->slb_esid_RW, esid);
@@ -320,6 +328,7 @@ static void spu_free_irqs(struct spu *spu)
320} 328}
321 329
322static struct list_head spu_list[MAX_NUMNODES]; 330static struct list_head spu_list[MAX_NUMNODES];
331static LIST_HEAD(spu_full_list);
323static DEFINE_MUTEX(spu_mutex); 332static DEFINE_MUTEX(spu_mutex);
324 333
325static void spu_init_channels(struct spu *spu) 334static void spu_init_channels(struct spu *spu)
@@ -364,8 +373,7 @@ struct spu *spu_alloc_node(int node)
364 if (!list_empty(&spu_list[node])) { 373 if (!list_empty(&spu_list[node])) {
365 spu = list_entry(spu_list[node].next, struct spu, list); 374 spu = list_entry(spu_list[node].next, struct spu, list);
366 list_del_init(&spu->list); 375 list_del_init(&spu->list);
367 pr_debug("Got SPU %x %d %d\n", 376 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
368 spu->isrc, spu->number, spu->node);
369 spu_init_channels(spu); 377 spu_init_channels(spu);
370 } 378 }
371 mutex_unlock(&spu_mutex); 379 mutex_unlock(&spu_mutex);
@@ -493,280 +501,65 @@ int spu_irq_class_1_bottom(struct spu *spu)
493 if (!error) { 501 if (!error) {
494 spu_restart_dma(spu); 502 spu_restart_dma(spu);
495 } else { 503 } else {
496 __spu_trap_invalid_dma(spu); 504 spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
497 } 505 }
498 return ret; 506 return ret;
499} 507}
500 508
501static int __init find_spu_node_id(struct device_node *spe) 509struct sysdev_class spu_sysdev_class = {
502{ 510 set_kset_name("spu")
503 const unsigned int *id; 511};
504 struct device_node *cpu;
505 cpu = spe->parent->parent;
506 id = get_property(cpu, "node-id", NULL);
507 return id ? *id : 0;
508}
509
510static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
511 const char *prop)
512{
513 static DEFINE_MUTEX(add_spumem_mutex);
514
515 const struct address_prop {
516 unsigned long address;
517 unsigned int len;
518 } __attribute__((packed)) *p;
519 int proplen;
520
521 unsigned long start_pfn, nr_pages;
522 struct pglist_data *pgdata;
523 struct zone *zone;
524 int ret;
525
526 p = get_property(spe, prop, &proplen);
527 WARN_ON(proplen != sizeof (*p));
528
529 start_pfn = p->address >> PAGE_SHIFT;
530 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
531
532 pgdata = NODE_DATA(spu->nid);
533 zone = pgdata->node_zones;
534
535 /* XXX rethink locking here */
536 mutex_lock(&add_spumem_mutex);
537 ret = __add_pages(zone, start_pfn, nr_pages);
538 mutex_unlock(&add_spumem_mutex);
539
540 return ret;
541}
542 512
543static void __iomem * __init map_spe_prop(struct spu *spu, 513int spu_add_sysdev_attr(struct sysdev_attribute *attr)
544 struct device_node *n, const char *name)
545{ 514{
546 const struct address_prop { 515 struct spu *spu;
547 unsigned long address; 516 mutex_lock(&spu_mutex);
548 unsigned int len;
549 } __attribute__((packed)) *prop;
550
551 const void *p;
552 int proplen;
553 void __iomem *ret = NULL;
554 int err = 0;
555
556 p = get_property(n, name, &proplen);
557 if (proplen != sizeof (struct address_prop))
558 return NULL;
559
560 prop = p;
561
562 err = cell_spuprop_present(spu, n, name);
563 if (err && (err != -EEXIST))
564 goto out;
565
566 ret = ioremap(prop->address, prop->len);
567
568 out:
569 return ret;
570}
571 517
572static void spu_unmap(struct spu *spu) 518 list_for_each_entry(spu, &spu_full_list, full_list)
573{ 519 sysdev_create_file(&spu->sysdev, attr);
574 iounmap(spu->priv2);
575 iounmap(spu->priv1);
576 iounmap(spu->problem);
577 iounmap((__force u8 __iomem *)spu->local_store);
578}
579 520
580/* This function shall be abstracted for HV platforms */ 521 mutex_unlock(&spu_mutex);
581static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) 522 return 0;
582{
583 unsigned int isrc;
584 const u32 *tmp;
585
586 /* Get the interrupt source unit from the device-tree */
587 tmp = get_property(np, "isrc", NULL);
588 if (!tmp)
589 return -ENODEV;
590 isrc = tmp[0];
591
592 /* Add the node number */
593 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
594 spu->isrc = isrc;
595
596 /* Now map interrupts of all 3 classes */
597 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
598 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
599 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
600
601 /* Right now, we only fail if class 2 failed */
602 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
603} 523}
524EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
604 525
605static int __init spu_map_device_old(struct spu *spu, struct device_node *node) 526int spu_add_sysdev_attr_group(struct attribute_group *attrs)
606{ 527{
607 const char *prop; 528 struct spu *spu;
608 int ret; 529 mutex_lock(&spu_mutex);
609
610 ret = -ENODEV;
611 spu->name = get_property(node, "name", NULL);
612 if (!spu->name)
613 goto out;
614
615 prop = get_property(node, "local-store", NULL);
616 if (!prop)
617 goto out;
618 spu->local_store_phys = *(unsigned long *)prop;
619
620 /* we use local store as ram, not io memory */
621 spu->local_store = (void __force *)
622 map_spe_prop(spu, node, "local-store");
623 if (!spu->local_store)
624 goto out;
625
626 prop = get_property(node, "problem", NULL);
627 if (!prop)
628 goto out_unmap;
629 spu->problem_phys = *(unsigned long *)prop;
630
631 spu->problem= map_spe_prop(spu, node, "problem");
632 if (!spu->problem)
633 goto out_unmap;
634
635 spu->priv1= map_spe_prop(spu, node, "priv1");
636 /* priv1 is not available on a hypervisor */
637
638 spu->priv2= map_spe_prop(spu, node, "priv2");
639 if (!spu->priv2)
640 goto out_unmap;
641 ret = 0;
642 goto out;
643
644out_unmap:
645 spu_unmap(spu);
646out:
647 return ret;
648}
649 530
650static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 531 list_for_each_entry(spu, &spu_full_list, full_list)
651{ 532 sysfs_create_group(&spu->sysdev.kobj, attrs);
652 struct of_irq oirq;
653 int ret;
654 int i;
655 533
656 for (i=0; i < 3; i++) { 534 mutex_unlock(&spu_mutex);
657 ret = of_irq_map_one(np, i, &oirq);
658 if (ret) {
659 pr_debug("spu_new: failed to get irq %d\n", i);
660 goto err;
661 }
662 ret = -EINVAL;
663 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
664 oirq.controller->full_name);
665 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
666 oirq.specifier, oirq.size);
667 if (spu->irqs[i] == NO_IRQ) {
668 pr_debug("spu_new: failed to map it !\n");
669 goto err;
670 }
671 }
672 return 0; 535 return 0;
673
674err:
675 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
676 for (; i >= 0; i--) {
677 if (spu->irqs[i] != NO_IRQ)
678 irq_dispose_mapping(spu->irqs[i]);
679 }
680 return ret;
681} 536}
537EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
682 538
683static int spu_map_resource(struct device_node *node, int nr,
684 void __iomem** virt, unsigned long *phys)
685{
686 struct resource resource = { };
687 int ret;
688
689 ret = of_address_to_resource(node, nr, &resource);
690 if (ret)
691 goto out;
692 539
693 if (phys) 540void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
694 *phys = resource.start;
695 *virt = ioremap(resource.start, resource.end - resource.start);
696 if (!*virt)
697 ret = -EINVAL;
698
699out:
700 return ret;
701}
702
703static int __init spu_map_device(struct spu *spu, struct device_node *node)
704{ 541{
705 int ret = -ENODEV; 542 struct spu *spu;
706 spu->name = get_property(node, "name", NULL); 543 mutex_lock(&spu_mutex);
707 if (!spu->name)
708 goto out;
709
710 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
711 &spu->local_store_phys);
712 if (ret) {
713 pr_debug("spu_new: failed to map %s resource 0\n",
714 node->full_name);
715 goto out;
716 }
717 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
718 &spu->problem_phys);
719 if (ret) {
720 pr_debug("spu_new: failed to map %s resource 1\n",
721 node->full_name);
722 goto out_unmap;
723 }
724 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
725 NULL);
726 if (ret) {
727 pr_debug("spu_new: failed to map %s resource 2\n",
728 node->full_name);
729 goto out_unmap;
730 }
731
732 if (!firmware_has_feature(FW_FEATURE_LPAR))
733 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
734 NULL);
735 if (ret) {
736 pr_debug("spu_new: failed to map %s resource 3\n",
737 node->full_name);
738 goto out_unmap;
739 }
740 pr_debug("spu_new: %s maps:\n", node->full_name);
741 pr_debug(" local store : 0x%016lx -> 0x%p\n",
742 spu->local_store_phys, spu->local_store);
743 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
744 spu->problem_phys, spu->problem);
745 pr_debug(" priv2 : 0x%p\n", spu->priv2);
746 pr_debug(" priv1 : 0x%p\n", spu->priv1);
747 544
748 return 0; 545 list_for_each_entry(spu, &spu_full_list, full_list)
546 sysdev_remove_file(&spu->sysdev, attr);
749 547
750out_unmap: 548 mutex_unlock(&spu_mutex);
751 spu_unmap(spu);
752out:
753 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
754 return ret;
755} 549}
550EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
756 551
757struct sysdev_class spu_sysdev_class = { 552void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
758 set_kset_name("spu")
759};
760
761static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
762{ 553{
763 struct spu *spu = container_of(sysdev, struct spu, sysdev); 554 struct spu *spu;
764 return sprintf(buf, "%d\n", spu->isrc); 555 mutex_lock(&spu_mutex);
765 556
766} 557 list_for_each_entry(spu, &spu_full_list, full_list)
767static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL); 558 sysfs_remove_group(&spu->sysdev.kobj, attrs);
768 559
769extern int attach_sysdev_to_node(struct sys_device *dev, int nid); 560 mutex_unlock(&spu_mutex);
561}
562EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
770 563
771static int spu_create_sysdev(struct spu *spu) 564static int spu_create_sysdev(struct spu *spu)
772{ 565{
@@ -781,21 +574,18 @@ static int spu_create_sysdev(struct spu *spu)
781 return ret; 574 return ret;
782 } 575 }
783 576
784 if (spu->isrc != 0) 577 sysfs_add_device_to_node(&spu->sysdev, spu->node);
785 sysdev_create_file(&spu->sysdev, &attr_isrc);
786 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
787 578
788 return 0; 579 return 0;
789} 580}
790 581
791static void spu_destroy_sysdev(struct spu *spu) 582static void spu_destroy_sysdev(struct spu *spu)
792{ 583{
793 sysdev_remove_file(&spu->sysdev, &attr_isrc); 584 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
794 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
795 sysdev_unregister(&spu->sysdev); 585 sysdev_unregister(&spu->sysdev);
796} 586}
797 587
798static int __init create_spu(struct device_node *spe) 588static int __init create_spu(void *data)
799{ 589{
800 struct spu *spu; 590 struct spu *spu;
801 int ret; 591 int ret;
@@ -806,57 +596,37 @@ static int __init create_spu(struct device_node *spe)
806 if (!spu) 596 if (!spu)
807 goto out; 597 goto out;
808 598
809 spu->node = find_spu_node_id(spe); 599 spin_lock_init(&spu->register_lock);
810 if (spu->node >= MAX_NUMNODES) { 600 mutex_lock(&spu_mutex);
811 printk(KERN_WARNING "SPE %s on node %d ignored," 601 spu->number = number++;
812 " node number too big\n", spe->full_name, spu->node); 602 mutex_unlock(&spu_mutex);
813 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); 603
814 return -ENODEV; 604 ret = spu_create_spu(spu, data);
815 }
816 spu->nid = of_node_to_nid(spe);
817 if (spu->nid == -1)
818 spu->nid = 0;
819 605
820 ret = spu_map_device(spu, spe);
821 /* try old method */
822 if (ret)
823 ret = spu_map_device_old(spu, spe);
824 if (ret) 606 if (ret)
825 goto out_free; 607 goto out_free;
826 608
827 ret = spu_map_interrupts(spu, spe); 609 spu_mfc_sdr_setup(spu);
828 if (ret)
829 ret = spu_map_interrupts_old(spu, spe);
830 if (ret)
831 goto out_unmap;
832 spin_lock_init(&spu->register_lock);
833 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
834 spu_mfc_sr1_set(spu, 0x33); 610 spu_mfc_sr1_set(spu, 0x33);
835 mutex_lock(&spu_mutex);
836
837 spu->number = number++;
838 ret = spu_request_irqs(spu); 611 ret = spu_request_irqs(spu);
839 if (ret) 612 if (ret)
840 goto out_unlock; 613 goto out_destroy;
841 614
842 ret = spu_create_sysdev(spu); 615 ret = spu_create_sysdev(spu);
843 if (ret) 616 if (ret)
844 goto out_free_irqs; 617 goto out_free_irqs;
845 618
619 mutex_lock(&spu_mutex);
846 list_add(&spu->list, &spu_list[spu->node]); 620 list_add(&spu->list, &spu_list[spu->node]);
621 list_add(&spu->full_list, &spu_full_list);
847 mutex_unlock(&spu_mutex); 622 mutex_unlock(&spu_mutex);
848 623
849 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
850 spu->name, spu->isrc, spu->local_store,
851 spu->problem, spu->priv1, spu->priv2, spu->number);
852 goto out; 624 goto out;
853 625
854out_free_irqs: 626out_free_irqs:
855 spu_free_irqs(spu); 627 spu_free_irqs(spu);
856out_unlock: 628out_destroy:
857 mutex_unlock(&spu_mutex); 629 spu_destroy_spu(spu);
858out_unmap:
859 spu_unmap(spu);
860out_free: 630out_free:
861 kfree(spu); 631 kfree(spu);
862out: 632out:
@@ -866,10 +636,11 @@ out:
866static void destroy_spu(struct spu *spu) 636static void destroy_spu(struct spu *spu)
867{ 637{
868 list_del_init(&spu->list); 638 list_del_init(&spu->list);
639 list_del_init(&spu->full_list);
869 640
870 spu_destroy_sysdev(spu); 641 spu_destroy_sysdev(spu);
871 spu_free_irqs(spu); 642 spu_free_irqs(spu);
872 spu_unmap(spu); 643 spu_destroy_spu(spu);
873 kfree(spu); 644 kfree(spu);
874} 645}
875 646
@@ -890,9 +661,11 @@ module_exit(cleanup_spu_base);
890 661
891static int __init init_spu_base(void) 662static int __init init_spu_base(void)
892{ 663{
893 struct device_node *node;
894 int i, ret; 664 int i, ret;
895 665
666 if (!spu_management_ops)
667 return 0;
668
896 /* create sysdev class for spus */ 669 /* create sysdev class for spus */
897 ret = sysdev_class_register(&spu_sysdev_class); 670 ret = sysdev_class_register(&spu_sysdev_class);
898 if (ret) 671 if (ret)
@@ -901,17 +674,17 @@ static int __init init_spu_base(void)
901 for (i = 0; i < MAX_NUMNODES; i++) 674 for (i = 0; i < MAX_NUMNODES; i++)
902 INIT_LIST_HEAD(&spu_list[i]); 675 INIT_LIST_HEAD(&spu_list[i]);
903 676
904 ret = -ENODEV; 677 ret = spu_enumerate_spus(create_spu);
905 for (node = of_find_node_by_type(NULL, "spe"); 678
906 node; node = of_find_node_by_type(node, "spe")) { 679 if (ret) {
907 ret = create_spu(node); 680 printk(KERN_WARNING "%s: Error initializing spus\n",
908 if (ret) { 681 __FUNCTION__);
909 printk(KERN_WARNING "%s: Error initializing %s\n", 682 cleanup_spu_base();
910 __FUNCTION__, node->name); 683 return ret;
911 cleanup_spu_base();
912 break;
913 }
914 } 684 }
685
686 xmon_register_spus(&spu_full_list);
687
915 return ret; 688 return ret;
916} 689}
917module_init(init_spu_base); 690module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
new file mode 100644
index 000000000000..6915b418ee73
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_coredump.c
@@ -0,0 +1,81 @@
1/*
2 * SPU core dump code
3 *
4 * (C) Copyright 2006 IBM Corp.
5 *
6 * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/module.h>
25#include <linux/syscalls.h>
26
27#include <asm/spu.h>
28
29static struct spu_coredump_calls spu_coredump_calls;
30static DEFINE_MUTEX(spu_coredump_mutex);
31
32int arch_notes_size(void)
33{
34 long ret;
35 struct module *owner = spu_coredump_calls.owner;
36
37 ret = -ENOSYS;
38 mutex_lock(&spu_coredump_mutex);
39 if (owner && try_module_get(owner)) {
40 ret = spu_coredump_calls.arch_notes_size();
41 module_put(owner);
42 }
43 mutex_unlock(&spu_coredump_mutex);
44 return ret;
45}
46
47void arch_write_notes(struct file *file)
48{
49 struct module *owner = spu_coredump_calls.owner;
50
51 mutex_lock(&spu_coredump_mutex);
52 if (owner && try_module_get(owner)) {
53 spu_coredump_calls.arch_write_notes(file);
54 module_put(owner);
55 }
56 mutex_unlock(&spu_coredump_mutex);
57}
58
59int register_arch_coredump_calls(struct spu_coredump_calls *calls)
60{
61 if (spu_coredump_calls.owner)
62 return -EBUSY;
63
64 mutex_lock(&spu_coredump_mutex);
65 spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
66 spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
67 spu_coredump_calls.owner = calls->owner;
68 mutex_unlock(&spu_coredump_mutex);
69 return 0;
70}
71EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
72
73void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
74{
75 BUG_ON(spu_coredump_calls.owner != calls->owner);
76
77 mutex_lock(&spu_coredump_mutex);
78 spu_coredump_calls.owner = NULL;
79 mutex_unlock(&spu_coredump_mutex);
80}
81EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 71b69f0a1a48..910a926b61a2 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,509 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21#include <linux/interrupt.h>
22#include <linux/list.h>
21#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/wait.h>
27#include <linux/mm.h>
28#include <linux/io.h>
29#include <linux/mutex.h>
30#include <linux/device.h>
22 31
23#include <asm/io.h>
24#include <asm/spu.h> 32#include <asm/spu.h>
25#include <asm/spu_priv1.h> 33#include <asm/spu_priv1.h>
34#include <asm/firmware.h>
35#include <asm/prom.h>
26 36
27#include "interrupt.h" 37#include "interrupt.h"
38#include "spu_priv1_mmio.h"
39
40static DEFINE_MUTEX(add_spumem_mutex);
41
42struct spu_pdata {
43 struct device_node *devnode;
44 struct spu_priv1 __iomem *priv1;
45};
46
47static struct spu_pdata *spu_get_pdata(struct spu *spu)
48{
49 BUG_ON(!spu->pdata);
50 return spu->pdata;
51}
52
53struct device_node *spu_devnode(struct spu *spu)
54{
55 return spu_get_pdata(spu)->devnode;
56}
57
58EXPORT_SYMBOL_GPL(spu_devnode);
59
60static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
61 const char *prop)
62{
63 const struct address_prop {
64 unsigned long address;
65 unsigned int len;
66 } __attribute__((packed)) *p;
67 int proplen;
68
69 unsigned long start_pfn, nr_pages;
70 struct pglist_data *pgdata;
71 struct zone *zone;
72 int ret;
73
74 p = get_property(spe, prop, &proplen);
75 WARN_ON(proplen != sizeof (*p));
76
77 start_pfn = p->address >> PAGE_SHIFT;
78 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
79
80 pgdata = NODE_DATA(spu->node);
81 zone = pgdata->node_zones;
82
83 /* XXX rethink locking here */
84 mutex_lock(&add_spumem_mutex);
85 ret = __add_pages(zone, start_pfn, nr_pages);
86 mutex_unlock(&add_spumem_mutex);
87
88 return ret;
89}
90
91static void __iomem * __init map_spe_prop(struct spu *spu,
92 struct device_node *n, const char *name)
93{
94 const struct address_prop {
95 unsigned long address;
96 unsigned int len;
97 } __attribute__((packed)) *prop;
98
99 const void *p;
100 int proplen;
101 void __iomem *ret = NULL;
102 int err = 0;
103
104 p = get_property(n, name, &proplen);
105 if (proplen != sizeof (struct address_prop))
106 return NULL;
107
108 prop = p;
109
110 err = cell_spuprop_present(spu, n, name);
111 if (err && (err != -EEXIST))
112 goto out;
113
114 ret = ioremap(prop->address, prop->len);
115
116 out:
117 return ret;
118}
119
120static void spu_unmap(struct spu *spu)
121{
122 iounmap(spu->priv2);
123 iounmap(spu_get_pdata(spu)->priv1);
124 iounmap(spu->problem);
125 iounmap((__force u8 __iomem *)spu->local_store);
126}
127
128static int __init spu_map_interrupts_old(struct spu *spu,
129 struct device_node *np)
130{
131 unsigned int isrc;
132 const u32 *tmp;
133 int nid;
134
135 /* Get the interrupt source unit from the device-tree */
136 tmp = get_property(np, "isrc", NULL);
137 if (!tmp)
138 return -ENODEV;
139 isrc = tmp[0];
140
141 tmp = get_property(np->parent->parent, "node-id", NULL);
142 if (!tmp) {
143 printk(KERN_WARNING "%s: can't find node-id\n", __FUNCTION__);
144 nid = spu->node;
145 } else
146 nid = tmp[0];
147
148 /* Add the node number */
149 isrc |= nid << IIC_IRQ_NODE_SHIFT;
150
151 /* Now map interrupts of all 3 classes */
152 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
153 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
154 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
155
156 /* Right now, we only fail if class 2 failed */
157 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
158}
159
160static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
161{
162 const char *prop;
163 int ret;
164
165 ret = -ENODEV;
166 spu->name = get_property(node, "name", NULL);
167 if (!spu->name)
168 goto out;
169
170 prop = get_property(node, "local-store", NULL);
171 if (!prop)
172 goto out;
173 spu->local_store_phys = *(unsigned long *)prop;
174
175 /* we use local store as ram, not io memory */
176 spu->local_store = (void __force *)
177 map_spe_prop(spu, node, "local-store");
178 if (!spu->local_store)
179 goto out;
180
181 prop = get_property(node, "problem", NULL);
182 if (!prop)
183 goto out_unmap;
184 spu->problem_phys = *(unsigned long *)prop;
185
186 spu->problem= map_spe_prop(spu, node, "problem");
187 if (!spu->problem)
188 goto out_unmap;
189
190 spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
191
192 spu->priv2= map_spe_prop(spu, node, "priv2");
193 if (!spu->priv2)
194 goto out_unmap;
195 ret = 0;
196 goto out;
197
198out_unmap:
199 spu_unmap(spu);
200out:
201 return ret;
202}
203
204static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
205{
206 struct of_irq oirq;
207 int ret;
208 int i;
209
210 for (i=0; i < 3; i++) {
211 ret = of_irq_map_one(np, i, &oirq);
212 if (ret) {
213 pr_debug("spu_new: failed to get irq %d\n", i);
214 goto err;
215 }
216 ret = -EINVAL;
217 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
218 oirq.controller->full_name);
219 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
220 oirq.specifier, oirq.size);
221 if (spu->irqs[i] == NO_IRQ) {
222 pr_debug("spu_new: failed to map it !\n");
223 goto err;
224 }
225 }
226 return 0;
227
228err:
229 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
230 spu->name);
231 for (; i >= 0; i--) {
232 if (spu->irqs[i] != NO_IRQ)
233 irq_dispose_mapping(spu->irqs[i]);
234 }
235 return ret;
236}
237
238static int spu_map_resource(struct spu *spu, int nr,
239 void __iomem** virt, unsigned long *phys)
240{
241 struct device_node *np = spu_get_pdata(spu)->devnode;
242 unsigned long start_pfn, nr_pages;
243 struct pglist_data *pgdata;
244 struct zone *zone;
245 struct resource resource = { };
246 unsigned long len;
247 int ret;
248
249 ret = of_address_to_resource(np, nr, &resource);
250 if (ret)
251 goto out;
252
253 if (phys)
254 *phys = resource.start;
255 len = resource.end - resource.start + 1;
256 *virt = ioremap(resource.start, len);
257 if (!*virt)
258 ret = -EINVAL;
259
260 start_pfn = resource.start >> PAGE_SHIFT;
261 nr_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
262
263 pgdata = NODE_DATA(spu->node);
264 zone = pgdata->node_zones;
265
266 /* XXX rethink locking here */
267 mutex_lock(&add_spumem_mutex);
268 ret = __add_pages(zone, start_pfn, nr_pages);
269 mutex_unlock(&add_spumem_mutex);
270
271out:
272 return ret;
273}
274
275static int __init spu_map_device(struct spu *spu)
276{
277 struct device_node *np = spu_get_pdata(spu)->devnode;
278 int ret = -ENODEV;
279
280 spu->name = get_property(np, "name", NULL);
281 if (!spu->name)
282 goto out;
283
284 ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
285 &spu->local_store_phys);
286 if (ret) {
287 pr_debug("spu_new: failed to map %s resource 0\n",
288 np->full_name);
289 goto out;
290 }
291 ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
292 &spu->problem_phys);
293 if (ret) {
294 pr_debug("spu_new: failed to map %s resource 1\n",
295 np->full_name);
296 goto out_unmap;
297 }
298 ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
299 if (ret) {
300 pr_debug("spu_new: failed to map %s resource 2\n",
301 np->full_name);
302 goto out_unmap;
303 }
304 if (!firmware_has_feature(FW_FEATURE_LPAR))
305 ret = spu_map_resource(spu, 3,
306 (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
307 if (ret) {
308 pr_debug("spu_new: failed to map %s resource 3\n",
309 np->full_name);
310 goto out_unmap;
311 }
312 pr_debug("spu_new: %s maps:\n", np->full_name);
313 pr_debug(" local store : 0x%016lx -> 0x%p\n",
314 spu->local_store_phys, spu->local_store);
315 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
316 spu->problem_phys, spu->problem);
317 pr_debug(" priv2 : 0x%p\n", spu->priv2);
318 pr_debug(" priv1 : 0x%p\n",
319 spu_get_pdata(spu)->priv1);
320
321 return 0;
322
323out_unmap:
324 spu_unmap(spu);
325out:
326 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
327 return ret;
328}
329
330static int __init of_enumerate_spus(int (*fn)(void *data))
331{
332 int ret;
333 struct device_node *node;
334
335 ret = -ENODEV;
336 for (node = of_find_node_by_type(NULL, "spe");
337 node; node = of_find_node_by_type(node, "spe")) {
338 ret = fn(node);
339 if (ret) {
340 printk(KERN_WARNING "%s: Error initializing %s\n",
341 __FUNCTION__, node->name);
342 break;
343 }
344 }
345 return ret;
346}
347
348static int __init of_create_spu(struct spu *spu, void *data)
349{
350 int ret;
351 struct device_node *spe = (struct device_node *)data;
352
353 spu->pdata = kzalloc(sizeof(struct spu_pdata),
354 GFP_KERNEL);
355 if (!spu->pdata) {
356 ret = -ENOMEM;
357 goto out;
358 }
359 spu_get_pdata(spu)->devnode = of_node_get(spe);
360
361 spu->node = of_node_to_nid(spe);
362 if (spu->node >= MAX_NUMNODES) {
363 printk(KERN_WARNING "SPE %s on node %d ignored,"
364 " node number too big\n", spe->full_name, spu->node);
365 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
366 ret = -ENODEV;
367 goto out_free;
368 }
369
370 ret = spu_map_device(spu);
371 /* try old method */
372 if (ret)
373 ret = spu_map_device_old(spu, spe);
374 if (ret)
375 goto out_free;
376
377 ret = spu_map_interrupts(spu, spe);
378 if (ret)
379 ret = spu_map_interrupts_old(spu, spe);
380 if (ret)
381 goto out_unmap;
382
383 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
384 spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
385 spu->priv2, spu->number);
386 goto out;
387
388out_unmap:
389 spu_unmap(spu);
390out_free:
391 kfree(spu->pdata);
392 spu->pdata = NULL;
393out:
394 return ret;
395}
396
397static int of_destroy_spu(struct spu *spu)
398{
399 spu_unmap(spu);
400 of_node_put(spu_get_pdata(spu)->devnode);
401 kfree(spu->pdata);
402 spu->pdata = NULL;
403 return 0;
404}
405
406const struct spu_management_ops spu_management_of_ops = {
407 .enumerate_spus = of_enumerate_spus,
408 .create_spu = of_create_spu,
409 .destroy_spu = of_destroy_spu,
410};
28 411
29static void int_mask_and(struct spu *spu, int class, u64 mask) 412static void int_mask_and(struct spu *spu, int class, u64 mask)
30{ 413{
31 u64 old_mask; 414 u64 old_mask;
32 415
33 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 416 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
34 out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); 417 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
418 old_mask & mask);
35} 419}
36 420
37static void int_mask_or(struct spu *spu, int class, u64 mask) 421static void int_mask_or(struct spu *spu, int class, u64 mask)
38{ 422{
39 u64 old_mask; 423 u64 old_mask;
40 424
41 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 425 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
42 out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); 426 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
427 old_mask | mask);
43} 428}
44 429
45static void int_mask_set(struct spu *spu, int class, u64 mask) 430static void int_mask_set(struct spu *spu, int class, u64 mask)
46{ 431{
47 out_be64(&spu->priv1->int_mask_RW[class], mask); 432 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
48} 433}
49 434
50static u64 int_mask_get(struct spu *spu, int class) 435static u64 int_mask_get(struct spu *spu, int class)
51{ 436{
52 return in_be64(&spu->priv1->int_mask_RW[class]); 437 return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
53} 438}
54 439
55static void int_stat_clear(struct spu *spu, int class, u64 stat) 440static void int_stat_clear(struct spu *spu, int class, u64 stat)
56{ 441{
57 out_be64(&spu->priv1->int_stat_RW[class], stat); 442 out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
58} 443}
59 444
60static u64 int_stat_get(struct spu *spu, int class) 445static u64 int_stat_get(struct spu *spu, int class)
61{ 446{
62 return in_be64(&spu->priv1->int_stat_RW[class]); 447 return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
63} 448}
64 449
65static void cpu_affinity_set(struct spu *spu, int cpu) 450static void cpu_affinity_set(struct spu *spu, int cpu)
66{ 451{
67 u64 target = iic_get_target_id(cpu); 452 u64 target = iic_get_target_id(cpu);
68 u64 route = target << 48 | target << 32 | target << 16; 453 u64 route = target << 48 | target << 32 | target << 16;
69 out_be64(&spu->priv1->int_route_RW, route); 454 out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
70} 455}
71 456
72static u64 mfc_dar_get(struct spu *spu) 457static u64 mfc_dar_get(struct spu *spu)
73{ 458{
74 return in_be64(&spu->priv1->mfc_dar_RW); 459 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
75} 460}
76 461
77static u64 mfc_dsisr_get(struct spu *spu) 462static u64 mfc_dsisr_get(struct spu *spu)
78{ 463{
79 return in_be64(&spu->priv1->mfc_dsisr_RW); 464 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
80} 465}
81 466
82static void mfc_dsisr_set(struct spu *spu, u64 dsisr) 467static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
83{ 468{
84 out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); 469 out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
85} 470}
86 471
87static void mfc_sdr_set(struct spu *spu, u64 sdr) 472static void mfc_sdr_setup(struct spu *spu)
88{ 473{
89 out_be64(&spu->priv1->mfc_sdr_RW, sdr); 474 out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
90} 475}
91 476
92static void mfc_sr1_set(struct spu *spu, u64 sr1) 477static void mfc_sr1_set(struct spu *spu, u64 sr1)
93{ 478{
94 out_be64(&spu->priv1->mfc_sr1_RW, sr1); 479 out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
95} 480}
96 481
97static u64 mfc_sr1_get(struct spu *spu) 482static u64 mfc_sr1_get(struct spu *spu)
98{ 483{
99 return in_be64(&spu->priv1->mfc_sr1_RW); 484 return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
100} 485}
101 486
102static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) 487static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
103{ 488{
104 out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); 489 out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
105} 490}
106 491
107static u64 mfc_tclass_id_get(struct spu *spu) 492static u64 mfc_tclass_id_get(struct spu *spu)
108{ 493{
109 return in_be64(&spu->priv1->mfc_tclass_id_RW); 494 return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
110} 495}
111 496
112static void tlb_invalidate(struct spu *spu) 497static void tlb_invalidate(struct spu *spu)
113{ 498{
114 out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); 499 out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
115} 500}
116 501
117static void resource_allocation_groupID_set(struct spu *spu, u64 id) 502static void resource_allocation_groupID_set(struct spu *spu, u64 id)
118{ 503{
119 out_be64(&spu->priv1->resource_allocation_groupID_RW, id); 504 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
505 id);
120} 506}
121 507
122static u64 resource_allocation_groupID_get(struct spu *spu) 508static u64 resource_allocation_groupID_get(struct spu *spu)
123{ 509{
124 return in_be64(&spu->priv1->resource_allocation_groupID_RW); 510 return in_be64(
511 &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
125} 512}
126 513
127static void resource_allocation_enable_set(struct spu *spu, u64 enable) 514static void resource_allocation_enable_set(struct spu *spu, u64 enable)
128{ 515{
129 out_be64(&spu->priv1->resource_allocation_enable_RW, enable); 516 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
517 enable);
130} 518}
131 519
132static u64 resource_allocation_enable_get(struct spu *spu) 520static u64 resource_allocation_enable_get(struct spu *spu)
133{ 521{
134 return in_be64(&spu->priv1->resource_allocation_enable_RW); 522 return in_be64(
523 &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
135} 524}
136 525
137const struct spu_priv1_ops spu_priv1_mmio_ops = 526const struct spu_priv1_ops spu_priv1_mmio_ops =
@@ -146,7 +535,7 @@ const struct spu_priv1_ops spu_priv1_mmio_ops =
146 .mfc_dar_get = mfc_dar_get, 535 .mfc_dar_get = mfc_dar_get,
147 .mfc_dsisr_get = mfc_dsisr_get, 536 .mfc_dsisr_get = mfc_dsisr_get,
148 .mfc_dsisr_set = mfc_dsisr_set, 537 .mfc_dsisr_set = mfc_dsisr_set,
149 .mfc_sdr_set = mfc_sdr_set, 538 .mfc_sdr_setup = mfc_sdr_setup,
150 .mfc_sr1_set = mfc_sr1_set, 539 .mfc_sr1_set = mfc_sr1_set,
151 .mfc_sr1_get = mfc_sr1_get, 540 .mfc_sr1_get = mfc_sr1_get,
152 .mfc_tclass_id_set = mfc_tclass_id_set, 541 .mfc_tclass_id_set = mfc_tclass_id_set,
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644
index 000000000000..7b62bd1cc256
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
1/*
2 * spu hypervisor abstraction for direct hardware access.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef SPU_PRIV1_MMIO_H
22#define SPU_PRIV1_MMIO_H
23
24struct device_node *spu_devnode(struct spu *spu);
25
26#endif /* SPU_PRIV1_MMIO_H */
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index ecdfbb35f82e..472217d19faf 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,7 +1,7 @@
1obj-y += switch.o 1obj-y += switch.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o 4spufs-y += inode.o file.o context.o syscalls.o coredump.o
5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
6 6
7# Rules to build switch.o with the help of SPU tool chain 7# Rules to build switch.o with the help of SPU tool chain
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 2d22cd59d6fc..1898f0d3a8b8 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -36,6 +36,7 @@
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/spu.h> 37#include <asm/spu.h>
38#include <asm/spu_csa.h> 38#include <asm/spu_csa.h>
39#include <asm/spu_info.h>
39#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
40#include "spufs.h" 41#include "spufs.h"
41 42
@@ -267,6 +268,11 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
267 return ctx->csa.lscsa->ls; 268 return ctx->csa.lscsa->ls;
268} 269}
269 270
271static u32 spu_backing_runcntl_read(struct spu_context *ctx)
272{
273 return ctx->csa.prob.spu_runcntl_RW;
274}
275
270static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) 276static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
271{ 277{
272 spin_lock(&ctx->csa.register_lock); 278 spin_lock(&ctx->csa.register_lock);
@@ -279,9 +285,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
279 spin_unlock(&ctx->csa.register_lock); 285 spin_unlock(&ctx->csa.register_lock);
280} 286}
281 287
282static void spu_backing_runcntl_stop(struct spu_context *ctx) 288static void spu_backing_master_start(struct spu_context *ctx)
289{
290 struct spu_state *csa = &ctx->csa;
291 u64 sr1;
292
293 spin_lock(&csa->register_lock);
294 sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
295 csa->priv1.mfc_sr1_RW = sr1;
296 spin_unlock(&csa->register_lock);
297}
298
299static void spu_backing_master_stop(struct spu_context *ctx)
283{ 300{
284 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); 301 struct spu_state *csa = &ctx->csa;
302 u64 sr1;
303
304 spin_lock(&csa->register_lock);
305 sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
306 csa->priv1.mfc_sr1_RW = sr1;
307 spin_unlock(&csa->register_lock);
285} 308}
286 309
287static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, 310static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
@@ -345,8 +368,10 @@ struct spu_context_ops spu_backing_ops = {
345 .npc_write = spu_backing_npc_write, 368 .npc_write = spu_backing_npc_write,
346 .status_read = spu_backing_status_read, 369 .status_read = spu_backing_status_read,
347 .get_ls = spu_backing_get_ls, 370 .get_ls = spu_backing_get_ls,
371 .runcntl_read = spu_backing_runcntl_read,
348 .runcntl_write = spu_backing_runcntl_write, 372 .runcntl_write = spu_backing_runcntl_write,
349 .runcntl_stop = spu_backing_runcntl_stop, 373 .master_start = spu_backing_master_start,
374 .master_stop = spu_backing_master_stop,
350 .set_mfc_query = spu_backing_set_mfc_query, 375 .set_mfc_query = spu_backing_set_mfc_query,
351 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, 376 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
352 .get_mfc_free_elements = spu_backing_get_mfc_free_elements, 377 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 034cf6af53a2..0870009f56db 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx)
120 unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); 120 unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
121} 121}
122 122
123int spu_acquire_exclusive(struct spu_context *ctx)
124{
125 int ret = 0;
126
127 down_write(&ctx->state_sema);
128 /* ctx is about to be freed, can't acquire any more */
129 if (!ctx->owner) {
130 ret = -EINVAL;
131 goto out;
132 }
133
134 if (ctx->state == SPU_STATE_SAVED) {
135 ret = spu_activate(ctx, 0);
136 if (ret)
137 goto out;
138 ctx->state = SPU_STATE_RUNNABLE;
139 } else {
140 /* We need to exclude userspace access to the context. */
141 spu_unmap_mappings(ctx);
142 }
143
144out:
145 if (ret)
146 up_write(&ctx->state_sema);
147 return ret;
148}
149
123int spu_acquire_runnable(struct spu_context *ctx) 150int spu_acquire_runnable(struct spu_context *ctx)
124{ 151{
125 int ret = 0; 152 int ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
new file mode 100644
index 000000000000..725e19561159
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -0,0 +1,238 @@
1/*
2 * SPU core dump code
3 *
4 * (C) Copyright 2006 IBM Corp.
5 *
6 * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/elf.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/syscalls.h>
29
30#include <asm/uaccess.h>
31
32#include "spufs.h"
33
34struct spufs_ctx_info {
35 struct list_head list;
36 int dfd;
37 int memsize; /* in bytes */
38 struct spu_context *ctx;
39};
40
41static LIST_HEAD(ctx_info_list);
42
43static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
44 size_t size, loff_t *off)
45{
46 u64 data;
47 int ret;
48
49 if (spufs_coredump_read[num].read)
50 return spufs_coredump_read[num].read(ctx, buffer, size, off);
51
52 data = spufs_coredump_read[num].get(ctx);
53 ret = copy_to_user(buffer, &data, 8);
54 return ret ? -EFAULT : 8;
55}
56
57/*
58 * These are the only things you should do on a core-file: use only these
59 * functions to write out all the necessary info.
60 */
61static int spufs_dump_write(struct file *file, const void *addr, int nr)
62{
63 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
64}
65
66static int spufs_dump_seek(struct file *file, loff_t off)
67{
68 if (file->f_op->llseek) {
69 if (file->f_op->llseek(file, off, 0) != off)
70 return 0;
71 } else
72 file->f_pos = off;
73 return 1;
74}
75
76static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info)
77{
78 struct spu_context *ctx;
79 unsigned long long lslr;
80
81 ctx = ctx_info->ctx;
82 lslr = ctx->csa.priv2.spu_lslr_RW;
83 ctx_info->memsize = lslr + 1;
84}
85
86static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
87{
88 int dfd, memsize, i, sz, total = 0;
89 char *name;
90 char fullname[80];
91
92 dfd = ctx_info->dfd;
93 memsize = ctx_info->memsize;
94
95 for (i = 0; spufs_coredump_read[i].name; i++) {
96 name = spufs_coredump_read[i].name;
97 sz = spufs_coredump_read[i].size;
98
99 sprintf(fullname, "SPU/%d/%s", dfd, name);
100
101 total += sizeof(struct elf_note);
102 total += roundup(strlen(fullname) + 1, 4);
103 if (!strcmp(name, "mem"))
104 total += roundup(memsize, 4);
105 else
106 total += roundup(sz, 4);
107 }
108
109 return total;
110}
111
112static int spufs_add_one_context(struct file *file, int dfd)
113{
114 struct spu_context *ctx;
115 struct spufs_ctx_info *ctx_info;
116 int size;
117
118 ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
119 if (ctx->flags & SPU_CREATE_NOSCHED)
120 return 0;
121
122 ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
123 if (unlikely(!ctx_info))
124 return -ENOMEM;
125
126 ctx_info->dfd = dfd;
127 ctx_info->ctx = ctx;
128
129 spufs_fill_memsize(ctx_info);
130
131 size = spufs_ctx_note_size(ctx_info);
132 list_add(&ctx_info->list, &ctx_info_list);
133 return size;
134}
135
136/*
137 * The additional architecture-specific notes for Cell are various
138 * context files in the spu context.
139 *
140 * This function iterates over all open file descriptors and sees
141 * if they are a directory in spufs. In that case we use spufs
142 * internal functionality to dump them without needing to actually
143 * open the files.
144 */
145static int spufs_arch_notes_size(void)
146{
147 struct fdtable *fdt = files_fdtable(current->files);
148 int size = 0, fd;
149
150 for (fd = 0; fd < fdt->max_fds; fd++) {
151 if (FD_ISSET(fd, fdt->open_fds)) {
152 struct file *file = fcheck(fd);
153
154 if (file && file->f_op == &spufs_context_fops) {
155 int rval = spufs_add_one_context(file, fd);
156 if (rval < 0)
157 break;
158 size += rval;
159 }
160 }
161 }
162
163 return size;
164}
165
166static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
167 struct file *file)
168{
169 struct spu_context *ctx;
170 loff_t pos = 0;
171 int sz, dfd, rc, total = 0;
172 const int bufsz = 4096;
173 char *name;
174 char fullname[80], *buf;
175 struct elf_note en;
176
177 buf = kmalloc(bufsz, GFP_KERNEL);
178 if (!buf)
179 return;
180
181 dfd = ctx_info->dfd;
182 name = spufs_coredump_read[i].name;
183
184 if (!strcmp(name, "mem"))
185 sz = ctx_info->memsize;
186 else
187 sz = spufs_coredump_read[i].size;
188
189 ctx = ctx_info->ctx;
190 if (!ctx) {
191 return;
192 }
193
194 sprintf(fullname, "SPU/%d/%s", dfd, name);
195 en.n_namesz = strlen(fullname) + 1;
196 en.n_descsz = sz;
197 en.n_type = NT_SPU;
198
199 if (!spufs_dump_write(file, &en, sizeof(en)))
200 return;
201 if (!spufs_dump_write(file, fullname, en.n_namesz))
202 return;
203 if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
204 return;
205
206 do {
207 rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
208 if (rc > 0) {
209 if (!spufs_dump_write(file, buf, rc))
210 return;
211 total += rc;
212 }
213 } while (rc == bufsz && total < sz);
214
215 spufs_dump_seek(file, roundup((unsigned long)file->f_pos
216 - total + sz, 4));
217}
218
219static void spufs_arch_write_notes(struct file *file)
220{
221 int j;
222 struct spufs_ctx_info *ctx_info, *next;
223
224 list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) {
225 spu_acquire_saved(ctx_info->ctx);
226 for (j = 0; j < spufs_coredump_num_notes; j++)
227 spufs_arch_write_note(ctx_info, j, file);
228 spu_release(ctx_info->ctx);
229 list_del(&ctx_info->list);
230 kfree(ctx_info);
231 }
232}
233
234struct spu_coredump_calls spufs_coredump_calls = {
235 .arch_notes_size = spufs_arch_notes_size,
236 .arch_write_notes = spufs_arch_write_notes,
237 .owner = THIS_MODULE,
238};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 533e2723e184..347eff56fcbd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,13 +32,13 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/semaphore.h> 33#include <asm/semaphore.h>
34#include <asm/spu.h> 34#include <asm/spu.h>
35#include <asm/spu_info.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include "spufs.h" 38#include "spufs.h"
38 39
39#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 40#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40 41
41
42static int 42static int
43spufs_mem_open(struct inode *inode, struct file *file) 43spufs_mem_open(struct inode *inode, struct file *file)
44{ 44{
@@ -51,18 +51,23 @@ spufs_mem_open(struct inode *inode, struct file *file)
51} 51}
52 52
53static ssize_t 53static ssize_t
54__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
55 size_t size, loff_t *pos)
56{
57 char *local_store = ctx->ops->get_ls(ctx);
58 return simple_read_from_buffer(buffer, size, pos, local_store,
59 LS_SIZE);
60}
61
62static ssize_t
54spufs_mem_read(struct file *file, char __user *buffer, 63spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos) 64 size_t size, loff_t *pos)
56{ 65{
57 struct spu_context *ctx = file->private_data;
58 char *local_store;
59 int ret; 66 int ret;
67 struct spu_context *ctx = file->private_data;
60 68
61 spu_acquire(ctx); 69 spu_acquire(ctx);
62 70 ret = __spufs_mem_read(ctx, buffer, size, pos);
63 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
65
66 spu_release(ctx); 71 spu_release(ctx);
67 return ret; 72 return ret;
68} 73}
@@ -104,11 +109,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
104 109
105 if (ctx->state == SPU_STATE_SAVED) { 110 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 111 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED)); 112 & ~_PAGE_NO_CACHE);
108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); 113 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
109 } else { 114 } else {
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 115 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED); 116 | _PAGE_NO_CACHE);
112 page = pfn_to_page((ctx->spu->local_store_phys + offset) 117 page = pfn_to_page((ctx->spu->local_store_phys + offset)
113 >> PAGE_SHIFT); 118 >> PAGE_SHIFT);
114 } 119 }
@@ -131,7 +136,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
131 if (!(vma->vm_flags & VM_SHARED)) 136 if (!(vma->vm_flags & VM_SHARED))
132 return -EINVAL; 137 return -EINVAL;
133 138
134 /* FIXME: */ 139 vma->vm_flags |= VM_IO;
135 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 140 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
136 | _PAGE_NO_CACHE); 141 | _PAGE_NO_CACHE);
137 142
@@ -200,7 +205,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
200 if (!(vma->vm_flags & VM_SHARED)) 205 if (!(vma->vm_flags & VM_SHARED))
201 return -EINVAL; 206 return -EINVAL;
202 207
203 vma->vm_flags |= VM_RESERVED; 208 vma->vm_flags |= VM_IO;
204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 209 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
205 | _PAGE_NO_CACHE | _PAGE_GUARDED); 210 | _PAGE_NO_CACHE | _PAGE_GUARDED);
206 211
@@ -261,18 +266,23 @@ spufs_regs_open(struct inode *inode, struct file *file)
261} 266}
262 267
263static ssize_t 268static ssize_t
269__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
270 size_t size, loff_t *pos)
271{
272 struct spu_lscsa *lscsa = ctx->csa.lscsa;
273 return simple_read_from_buffer(buffer, size, pos,
274 lscsa->gprs, sizeof lscsa->gprs);
275}
276
277static ssize_t
264spufs_regs_read(struct file *file, char __user *buffer, 278spufs_regs_read(struct file *file, char __user *buffer,
265 size_t size, loff_t *pos) 279 size_t size, loff_t *pos)
266{ 280{
267 struct spu_context *ctx = file->private_data;
268 struct spu_lscsa *lscsa = ctx->csa.lscsa;
269 int ret; 281 int ret;
282 struct spu_context *ctx = file->private_data;
270 283
271 spu_acquire_saved(ctx); 284 spu_acquire_saved(ctx);
272 285 ret = __spufs_regs_read(ctx, buffer, size, pos);
273 ret = simple_read_from_buffer(buffer, size, pos,
274 lscsa->gprs, sizeof lscsa->gprs);
275
276 spu_release(ctx); 286 spu_release(ctx);
277 return ret; 287 return ret;
278} 288}
@@ -307,18 +317,23 @@ static struct file_operations spufs_regs_fops = {
307}; 317};
308 318
309static ssize_t 319static ssize_t
320__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
321 size_t size, loff_t * pos)
322{
323 struct spu_lscsa *lscsa = ctx->csa.lscsa;
324 return simple_read_from_buffer(buffer, size, pos,
325 &lscsa->fpcr, sizeof(lscsa->fpcr));
326}
327
328static ssize_t
310spufs_fpcr_read(struct file *file, char __user * buffer, 329spufs_fpcr_read(struct file *file, char __user * buffer,
311 size_t size, loff_t * pos) 330 size_t size, loff_t * pos)
312{ 331{
313 struct spu_context *ctx = file->private_data;
314 struct spu_lscsa *lscsa = ctx->csa.lscsa;
315 int ret; 332 int ret;
333 struct spu_context *ctx = file->private_data;
316 334
317 spu_acquire_saved(ctx); 335 spu_acquire_saved(ctx);
318 336 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
319 ret = simple_read_from_buffer(buffer, size, pos,
320 &lscsa->fpcr, sizeof(lscsa->fpcr));
321
322 spu_release(ctx); 337 spu_release(ctx);
323 return ret; 338 return ret;
324} 339}
@@ -718,23 +733,41 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
718 return nonseekable_open(inode, file); 733 return nonseekable_open(inode, file);
719} 734}
720 735
721static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 736static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
722 size_t len, loff_t *pos) 737 size_t len, loff_t *pos)
723{ 738{
724 struct spu_context *ctx = file->private_data; 739 int ret = 0;
725 u32 data; 740 u32 data;
726 741
727 if (len < 4) 742 if (len < 4)
728 return -EINVAL; 743 return -EINVAL;
729 744
730 spu_acquire(ctx); 745 if (ctx->csa.spu_chnlcnt_RW[3]) {
731 data = ctx->ops->signal1_read(ctx); 746 data = ctx->csa.spu_chnldata_RW[3];
732 spu_release(ctx); 747 ret = 4;
748 }
749
750 if (!ret)
751 goto out;
733 752
734 if (copy_to_user(buf, &data, 4)) 753 if (copy_to_user(buf, &data, 4))
735 return -EFAULT; 754 return -EFAULT;
736 755
737 return 4; 756out:
757 return ret;
758}
759
760static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
761 size_t len, loff_t *pos)
762{
763 int ret;
764 struct spu_context *ctx = file->private_data;
765
766 spu_acquire_saved(ctx);
767 ret = __spufs_signal1_read(ctx, buf, len, pos);
768 spu_release(ctx);
769
770 return ret;
738} 771}
739 772
740static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 773static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
@@ -782,7 +815,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
782 if (!(vma->vm_flags & VM_SHARED)) 815 if (!(vma->vm_flags & VM_SHARED))
783 return -EINVAL; 816 return -EINVAL;
784 817
785 vma->vm_flags |= VM_RESERVED; 818 vma->vm_flags |= VM_IO;
786 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 819 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
787 | _PAGE_NO_CACHE | _PAGE_GUARDED); 820 | _PAGE_NO_CACHE | _PAGE_GUARDED);
788 821
@@ -807,25 +840,41 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
807 return nonseekable_open(inode, file); 840 return nonseekable_open(inode, file);
808} 841}
809 842
810static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 843static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
811 size_t len, loff_t *pos) 844 size_t len, loff_t *pos)
812{ 845{
813 struct spu_context *ctx; 846 int ret = 0;
814 u32 data; 847 u32 data;
815 848
816 ctx = file->private_data;
817
818 if (len < 4) 849 if (len < 4)
819 return -EINVAL; 850 return -EINVAL;
820 851
821 spu_acquire(ctx); 852 if (ctx->csa.spu_chnlcnt_RW[4]) {
822 data = ctx->ops->signal2_read(ctx); 853 data = ctx->csa.spu_chnldata_RW[4];
823 spu_release(ctx); 854 ret = 4;
855 }
856
857 if (!ret)
858 goto out;
824 859
825 if (copy_to_user(buf, &data, 4)) 860 if (copy_to_user(buf, &data, 4))
826 return -EFAULT; 861 return -EFAULT;
827 862
828 return 4; 863out:
864 return ret;
865}
866
867static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
868 size_t len, loff_t *pos)
869{
870 struct spu_context *ctx = file->private_data;
871 int ret;
872
873 spu_acquire_saved(ctx);
874 ret = __spufs_signal2_read(ctx, buf, len, pos);
875 spu_release(ctx);
876
877 return ret;
829} 878}
830 879
831static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 880static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
@@ -874,8 +923,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
874 if (!(vma->vm_flags & VM_SHARED)) 923 if (!(vma->vm_flags & VM_SHARED))
875 return -EINVAL; 924 return -EINVAL;
876 925
877 /* FIXME: */ 926 vma->vm_flags |= VM_IO;
878 vma->vm_flags |= VM_RESERVED;
879 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 927 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
880 | _PAGE_NO_CACHE | _PAGE_GUARDED); 928 | _PAGE_NO_CACHE | _PAGE_GUARDED);
881 929
@@ -902,13 +950,19 @@ static void spufs_signal1_type_set(void *data, u64 val)
902 spu_release(ctx); 950 spu_release(ctx);
903} 951}
904 952
953static u64 __spufs_signal1_type_get(void *data)
954{
955 struct spu_context *ctx = data;
956 return ctx->ops->signal1_type_get(ctx);
957}
958
905static u64 spufs_signal1_type_get(void *data) 959static u64 spufs_signal1_type_get(void *data)
906{ 960{
907 struct spu_context *ctx = data; 961 struct spu_context *ctx = data;
908 u64 ret; 962 u64 ret;
909 963
910 spu_acquire(ctx); 964 spu_acquire(ctx);
911 ret = ctx->ops->signal1_type_get(ctx); 965 ret = __spufs_signal1_type_get(data);
912 spu_release(ctx); 966 spu_release(ctx);
913 967
914 return ret; 968 return ret;
@@ -925,13 +979,19 @@ static void spufs_signal2_type_set(void *data, u64 val)
925 spu_release(ctx); 979 spu_release(ctx);
926} 980}
927 981
982static u64 __spufs_signal2_type_get(void *data)
983{
984 struct spu_context *ctx = data;
985 return ctx->ops->signal2_type_get(ctx);
986}
987
928static u64 spufs_signal2_type_get(void *data) 988static u64 spufs_signal2_type_get(void *data)
929{ 989{
930 struct spu_context *ctx = data; 990 struct spu_context *ctx = data;
931 u64 ret; 991 u64 ret;
932 992
933 spu_acquire(ctx); 993 spu_acquire(ctx);
934 ret = ctx->ops->signal2_type_get(ctx); 994 ret = __spufs_signal2_type_get(data);
935 spu_release(ctx); 995 spu_release(ctx);
936 996
937 return ret; 997 return ret;
@@ -958,7 +1018,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
958 if (!(vma->vm_flags & VM_SHARED)) 1018 if (!(vma->vm_flags & VM_SHARED))
959 return -EINVAL; 1019 return -EINVAL;
960 1020
961 vma->vm_flags |= VM_RESERVED; 1021 vma->vm_flags |= VM_IO;
962 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1022 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
963 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1023 | _PAGE_NO_CACHE | _PAGE_GUARDED);
964 1024
@@ -1000,7 +1060,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1000 if (!(vma->vm_flags & VM_SHARED)) 1060 if (!(vma->vm_flags & VM_SHARED))
1001 return -EINVAL; 1061 return -EINVAL;
1002 1062
1003 vma->vm_flags |= VM_RESERVED; 1063 vma->vm_flags |= VM_IO;
1004 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1064 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1005 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1065 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1006 1066
@@ -1041,7 +1101,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1041 if (!(vma->vm_flags & VM_SHARED)) 1101 if (!(vma->vm_flags & VM_SHARED))
1042 return -EINVAL; 1102 return -EINVAL;
1043 1103
1044 vma->vm_flags |= VM_RESERVED; 1104 vma->vm_flags |= VM_IO;
1045 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1105 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1046 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1106 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1047 1107
@@ -1265,6 +1325,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1265 goto out; 1325 goto out;
1266 1326
1267 ctx->tagwait |= 1 << cmd.tag; 1327 ctx->tagwait |= 1 << cmd.tag;
1328 ret = size;
1268 1329
1269out: 1330out:
1270 return ret; 1331 return ret;
@@ -1360,7 +1421,8 @@ static u64 spufs_npc_get(void *data)
1360 spu_release(ctx); 1421 spu_release(ctx);
1361 return ret; 1422 return ret;
1362} 1423}
1363DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n") 1424DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1425 "0x%llx\n")
1364 1426
1365static void spufs_decr_set(void *data, u64 val) 1427static void spufs_decr_set(void *data, u64 val)
1366{ 1428{
@@ -1371,18 +1433,24 @@ static void spufs_decr_set(void *data, u64 val)
1371 spu_release(ctx); 1433 spu_release(ctx);
1372} 1434}
1373 1435
1374static u64 spufs_decr_get(void *data) 1436static u64 __spufs_decr_get(void *data)
1375{ 1437{
1376 struct spu_context *ctx = data; 1438 struct spu_context *ctx = data;
1377 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1439 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1440 return lscsa->decr.slot[0];
1441}
1442
1443static u64 spufs_decr_get(void *data)
1444{
1445 struct spu_context *ctx = data;
1378 u64 ret; 1446 u64 ret;
1379 spu_acquire_saved(ctx); 1447 spu_acquire_saved(ctx);
1380 ret = lscsa->decr.slot[0]; 1448 ret = __spufs_decr_get(data);
1381 spu_release(ctx); 1449 spu_release(ctx);
1382 return ret; 1450 return ret;
1383} 1451}
1384DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1452DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1385 "%llx\n") 1453 "0x%llx\n")
1386 1454
1387static void spufs_decr_status_set(void *data, u64 val) 1455static void spufs_decr_status_set(void *data, u64 val)
1388{ 1456{
@@ -1393,62 +1461,76 @@ static void spufs_decr_status_set(void *data, u64 val)
1393 spu_release(ctx); 1461 spu_release(ctx);
1394} 1462}
1395 1463
1396static u64 spufs_decr_status_get(void *data) 1464static u64 __spufs_decr_status_get(void *data)
1397{ 1465{
1398 struct spu_context *ctx = data; 1466 struct spu_context *ctx = data;
1399 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1467 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1468 return lscsa->decr_status.slot[0];
1469}
1470
1471static u64 spufs_decr_status_get(void *data)
1472{
1473 struct spu_context *ctx = data;
1400 u64 ret; 1474 u64 ret;
1401 spu_acquire_saved(ctx); 1475 spu_acquire_saved(ctx);
1402 ret = lscsa->decr_status.slot[0]; 1476 ret = __spufs_decr_status_get(data);
1403 spu_release(ctx); 1477 spu_release(ctx);
1404 return ret; 1478 return ret;
1405} 1479}
1406DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1480DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1407 spufs_decr_status_set, "%llx\n") 1481 spufs_decr_status_set, "0x%llx\n")
1408 1482
1409static void spufs_spu_tag_mask_set(void *data, u64 val) 1483static void spufs_event_mask_set(void *data, u64 val)
1410{ 1484{
1411 struct spu_context *ctx = data; 1485 struct spu_context *ctx = data;
1412 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1486 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1413 spu_acquire_saved(ctx); 1487 spu_acquire_saved(ctx);
1414 lscsa->tag_mask.slot[0] = (u32) val; 1488 lscsa->event_mask.slot[0] = (u32) val;
1415 spu_release(ctx); 1489 spu_release(ctx);
1416} 1490}
1417 1491
1418static u64 spufs_spu_tag_mask_get(void *data) 1492static u64 __spufs_event_mask_get(void *data)
1419{ 1493{
1420 struct spu_context *ctx = data; 1494 struct spu_context *ctx = data;
1421 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1495 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1496 return lscsa->event_mask.slot[0];
1497}
1498
1499static u64 spufs_event_mask_get(void *data)
1500{
1501 struct spu_context *ctx = data;
1422 u64 ret; 1502 u64 ret;
1423 spu_acquire_saved(ctx); 1503 spu_acquire_saved(ctx);
1424 ret = lscsa->tag_mask.slot[0]; 1504 ret = __spufs_event_mask_get(data);
1425 spu_release(ctx); 1505 spu_release(ctx);
1426 return ret; 1506 return ret;
1427} 1507}
1428DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get, 1508DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1429 spufs_spu_tag_mask_set, "%llx\n") 1509 spufs_event_mask_set, "0x%llx\n")
1430 1510
1431static void spufs_event_mask_set(void *data, u64 val) 1511static u64 __spufs_event_status_get(void *data)
1432{ 1512{
1433 struct spu_context *ctx = data; 1513 struct spu_context *ctx = data;
1434 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1514 struct spu_state *state = &ctx->csa;
1435 spu_acquire_saved(ctx); 1515 u64 stat;
1436 lscsa->event_mask.slot[0] = (u32) val; 1516 stat = state->spu_chnlcnt_RW[0];
1437 spu_release(ctx); 1517 if (stat)
1518 return state->spu_chnldata_RW[0];
1519 return 0;
1438} 1520}
1439 1521
1440static u64 spufs_event_mask_get(void *data) 1522static u64 spufs_event_status_get(void *data)
1441{ 1523{
1442 struct spu_context *ctx = data; 1524 struct spu_context *ctx = data;
1443 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1525 u64 ret = 0;
1444 u64 ret; 1526
1445 spu_acquire_saved(ctx); 1527 spu_acquire_saved(ctx);
1446 ret = lscsa->event_mask.slot[0]; 1528 ret = __spufs_event_status_get(data);
1447 spu_release(ctx); 1529 spu_release(ctx);
1448 return ret; 1530 return ret;
1449} 1531}
1450DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1532DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1451 spufs_event_mask_set, "%llx\n") 1533 NULL, "0x%llx\n")
1452 1534
1453static void spufs_srr0_set(void *data, u64 val) 1535static void spufs_srr0_set(void *data, u64 val)
1454{ 1536{
@@ -1470,7 +1552,7 @@ static u64 spufs_srr0_get(void *data)
1470 return ret; 1552 return ret;
1471} 1553}
1472DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1554DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1473 "%llx\n") 1555 "0x%llx\n")
1474 1556
1475static u64 spufs_id_get(void *data) 1557static u64 spufs_id_get(void *data)
1476{ 1558{
@@ -1488,12 +1570,18 @@ static u64 spufs_id_get(void *data)
1488} 1570}
1489DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n") 1571DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1490 1572
1491static u64 spufs_object_id_get(void *data) 1573static u64 __spufs_object_id_get(void *data)
1492{ 1574{
1493 struct spu_context *ctx = data; 1575 struct spu_context *ctx = data;
1494 return ctx->object_id; 1576 return ctx->object_id;
1495} 1577}
1496 1578
1579static u64 spufs_object_id_get(void *data)
1580{
1581 /* FIXME: Should there really be no locking here? */
1582 return __spufs_object_id_get(data);
1583}
1584
1497static void spufs_object_id_set(void *data, u64 id) 1585static void spufs_object_id_set(void *data, u64 id)
1498{ 1586{
1499 struct spu_context *ctx = data; 1587 struct spu_context *ctx = data;
@@ -1503,6 +1591,250 @@ static void spufs_object_id_set(void *data, u64 id)
1503DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1591DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1504 spufs_object_id_set, "0x%llx\n"); 1592 spufs_object_id_set, "0x%llx\n");
1505 1593
1594static u64 __spufs_lslr_get(void *data)
1595{
1596 struct spu_context *ctx = data;
1597 return ctx->csa.priv2.spu_lslr_RW;
1598}
1599
1600static u64 spufs_lslr_get(void *data)
1601{
1602 struct spu_context *ctx = data;
1603 u64 ret;
1604
1605 spu_acquire_saved(ctx);
1606 ret = __spufs_lslr_get(data);
1607 spu_release(ctx);
1608
1609 return ret;
1610}
1611DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1612
1613static int spufs_info_open(struct inode *inode, struct file *file)
1614{
1615 struct spufs_inode_info *i = SPUFS_I(inode);
1616 struct spu_context *ctx = i->i_ctx;
1617 file->private_data = ctx;
1618 return 0;
1619}
1620
1621static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1622 char __user *buf, size_t len, loff_t *pos)
1623{
1624 u32 mbox_stat;
1625 u32 data;
1626
1627 mbox_stat = ctx->csa.prob.mb_stat_R;
1628 if (mbox_stat & 0x0000ff) {
1629 data = ctx->csa.prob.pu_mb_R;
1630 }
1631
1632 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1633}
1634
1635static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1636 size_t len, loff_t *pos)
1637{
1638 int ret;
1639 struct spu_context *ctx = file->private_data;
1640
1641 if (!access_ok(VERIFY_WRITE, buf, len))
1642 return -EFAULT;
1643
1644 spu_acquire_saved(ctx);
1645 spin_lock(&ctx->csa.register_lock);
1646 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1647 spin_unlock(&ctx->csa.register_lock);
1648 spu_release(ctx);
1649
1650 return ret;
1651}
1652
1653static struct file_operations spufs_mbox_info_fops = {
1654 .open = spufs_info_open,
1655 .read = spufs_mbox_info_read,
1656 .llseek = generic_file_llseek,
1657};
1658
1659static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1660 char __user *buf, size_t len, loff_t *pos)
1661{
1662 u32 ibox_stat;
1663 u32 data;
1664
1665 ibox_stat = ctx->csa.prob.mb_stat_R;
1666 if (ibox_stat & 0xff0000) {
1667 data = ctx->csa.priv2.puint_mb_R;
1668 }
1669
1670 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1671}
1672
1673static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1674 size_t len, loff_t *pos)
1675{
1676 struct spu_context *ctx = file->private_data;
1677 int ret;
1678
1679 if (!access_ok(VERIFY_WRITE, buf, len))
1680 return -EFAULT;
1681
1682 spu_acquire_saved(ctx);
1683 spin_lock(&ctx->csa.register_lock);
1684 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1685 spin_unlock(&ctx->csa.register_lock);
1686 spu_release(ctx);
1687
1688 return ret;
1689}
1690
1691static struct file_operations spufs_ibox_info_fops = {
1692 .open = spufs_info_open,
1693 .read = spufs_ibox_info_read,
1694 .llseek = generic_file_llseek,
1695};
1696
1697static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1698 char __user *buf, size_t len, loff_t *pos)
1699{
1700 int i, cnt;
1701 u32 data[4];
1702 u32 wbox_stat;
1703
1704 wbox_stat = ctx->csa.prob.mb_stat_R;
1705 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1706 for (i = 0; i < cnt; i++) {
1707 data[i] = ctx->csa.spu_mailbox_data[i];
1708 }
1709
1710 return simple_read_from_buffer(buf, len, pos, &data,
1711 cnt * sizeof(u32));
1712}
1713
1714static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1715 size_t len, loff_t *pos)
1716{
1717 struct spu_context *ctx = file->private_data;
1718 int ret;
1719
1720 if (!access_ok(VERIFY_WRITE, buf, len))
1721 return -EFAULT;
1722
1723 spu_acquire_saved(ctx);
1724 spin_lock(&ctx->csa.register_lock);
1725 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1726 spin_unlock(&ctx->csa.register_lock);
1727 spu_release(ctx);
1728
1729 return ret;
1730}
1731
1732static struct file_operations spufs_wbox_info_fops = {
1733 .open = spufs_info_open,
1734 .read = spufs_wbox_info_read,
1735 .llseek = generic_file_llseek,
1736};
1737
1738static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1739 char __user *buf, size_t len, loff_t *pos)
1740{
1741 struct spu_dma_info info;
1742 struct mfc_cq_sr *qp, *spuqp;
1743 int i;
1744
1745 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1746 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1747 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1748 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1749 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1750 for (i = 0; i < 16; i++) {
1751 qp = &info.dma_info_command_data[i];
1752 spuqp = &ctx->csa.priv2.spuq[i];
1753
1754 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1755 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1756 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1757 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1758 }
1759
1760 return simple_read_from_buffer(buf, len, pos, &info,
1761 sizeof info);
1762}
1763
1764static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1765 size_t len, loff_t *pos)
1766{
1767 struct spu_context *ctx = file->private_data;
1768 int ret;
1769
1770 if (!access_ok(VERIFY_WRITE, buf, len))
1771 return -EFAULT;
1772
1773 spu_acquire_saved(ctx);
1774 spin_lock(&ctx->csa.register_lock);
1775 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1776 spin_unlock(&ctx->csa.register_lock);
1777 spu_release(ctx);
1778
1779 return ret;
1780}
1781
1782static struct file_operations spufs_dma_info_fops = {
1783 .open = spufs_info_open,
1784 .read = spufs_dma_info_read,
1785};
1786
1787static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1788 char __user *buf, size_t len, loff_t *pos)
1789{
1790 struct spu_proxydma_info info;
1791 struct mfc_cq_sr *qp, *puqp;
1792 int ret = sizeof info;
1793 int i;
1794
1795 if (len < ret)
1796 return -EINVAL;
1797
1798 if (!access_ok(VERIFY_WRITE, buf, len))
1799 return -EFAULT;
1800
1801 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1802 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1803 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1804 for (i = 0; i < 8; i++) {
1805 qp = &info.proxydma_info_command_data[i];
1806 puqp = &ctx->csa.priv2.puq[i];
1807
1808 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1809 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1810 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1811 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1812 }
1813
1814 return simple_read_from_buffer(buf, len, pos, &info,
1815 sizeof info);
1816}
1817
1818static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1819 size_t len, loff_t *pos)
1820{
1821 struct spu_context *ctx = file->private_data;
1822 int ret;
1823
1824 spu_acquire_saved(ctx);
1825 spin_lock(&ctx->csa.register_lock);
1826 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1827 spin_unlock(&ctx->csa.register_lock);
1828 spu_release(ctx);
1829
1830 return ret;
1831}
1832
1833static struct file_operations spufs_proxydma_info_fops = {
1834 .open = spufs_info_open,
1835 .read = spufs_proxydma_info_read,
1836};
1837
1506struct tree_descr spufs_dir_contents[] = { 1838struct tree_descr spufs_dir_contents[] = {
1507 { "mem", &spufs_mem_fops, 0666, }, 1839 { "mem", &spufs_mem_fops, 0666, },
1508 { "regs", &spufs_regs_fops, 0666, }, 1840 { "regs", &spufs_regs_fops, 0666, },
@@ -1516,18 +1848,70 @@ struct tree_descr spufs_dir_contents[] = {
1516 { "signal2", &spufs_signal2_fops, 0666, }, 1848 { "signal2", &spufs_signal2_fops, 0666, },
1517 { "signal1_type", &spufs_signal1_type, 0666, }, 1849 { "signal1_type", &spufs_signal1_type, 0666, },
1518 { "signal2_type", &spufs_signal2_type, 0666, }, 1850 { "signal2_type", &spufs_signal2_type, 0666, },
1519 { "mss", &spufs_mss_fops, 0666, },
1520 { "mfc", &spufs_mfc_fops, 0666, },
1521 { "cntl", &spufs_cntl_fops, 0666, }, 1851 { "cntl", &spufs_cntl_fops, 0666, },
1522 { "npc", &spufs_npc_ops, 0666, },
1523 { "fpcr", &spufs_fpcr_fops, 0666, }, 1852 { "fpcr", &spufs_fpcr_fops, 0666, },
1853 { "lslr", &spufs_lslr_ops, 0444, },
1854 { "mfc", &spufs_mfc_fops, 0666, },
1855 { "mss", &spufs_mss_fops, 0666, },
1856 { "npc", &spufs_npc_ops, 0666, },
1857 { "srr0", &spufs_srr0_ops, 0666, },
1524 { "decr", &spufs_decr_ops, 0666, }, 1858 { "decr", &spufs_decr_ops, 0666, },
1525 { "decr_status", &spufs_decr_status_ops, 0666, }, 1859 { "decr_status", &spufs_decr_status_ops, 0666, },
1526 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1527 { "event_mask", &spufs_event_mask_ops, 0666, }, 1860 { "event_mask", &spufs_event_mask_ops, 0666, },
1528 { "srr0", &spufs_srr0_ops, 0666, }, 1861 { "event_status", &spufs_event_status_ops, 0444, },
1862 { "psmap", &spufs_psmap_fops, 0666, },
1863 { "phys-id", &spufs_id_ops, 0666, },
1864 { "object-id", &spufs_object_id_ops, 0666, },
1865 { "mbox_info", &spufs_mbox_info_fops, 0444, },
1866 { "ibox_info", &spufs_ibox_info_fops, 0444, },
1867 { "wbox_info", &spufs_wbox_info_fops, 0444, },
1868 { "dma_info", &spufs_dma_info_fops, 0444, },
1869 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1870 {},
1871};
1872
1873struct tree_descr spufs_dir_nosched_contents[] = {
1874 { "mem", &spufs_mem_fops, 0666, },
1875 { "mbox", &spufs_mbox_fops, 0444, },
1876 { "ibox", &spufs_ibox_fops, 0444, },
1877 { "wbox", &spufs_wbox_fops, 0222, },
1878 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1879 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1880 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1881 { "signal1", &spufs_signal1_fops, 0666, },
1882 { "signal2", &spufs_signal2_fops, 0666, },
1883 { "signal1_type", &spufs_signal1_type, 0666, },
1884 { "signal2_type", &spufs_signal2_type, 0666, },
1885 { "mss", &spufs_mss_fops, 0666, },
1886 { "mfc", &spufs_mfc_fops, 0666, },
1887 { "cntl", &spufs_cntl_fops, 0666, },
1888 { "npc", &spufs_npc_ops, 0666, },
1529 { "psmap", &spufs_psmap_fops, 0666, }, 1889 { "psmap", &spufs_psmap_fops, 0666, },
1530 { "phys-id", &spufs_id_ops, 0666, }, 1890 { "phys-id", &spufs_id_ops, 0666, },
1531 { "object-id", &spufs_object_id_ops, 0666, }, 1891 { "object-id", &spufs_object_id_ops, 0666, },
1532 {}, 1892 {},
1533}; 1893};
1894
1895struct spufs_coredump_reader spufs_coredump_read[] = {
1896 { "regs", __spufs_regs_read, NULL, 128 * 16 },
1897 { "fpcr", __spufs_fpcr_read, NULL, 16 },
1898 { "lslr", NULL, __spufs_lslr_get, 11 },
1899 { "decr", NULL, __spufs_decr_get, 11 },
1900 { "decr_status", NULL, __spufs_decr_status_get, 11 },
1901 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
1902 { "signal1", __spufs_signal1_read, NULL, 4 },
1903 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
1904 { "signal2", __spufs_signal2_read, NULL, 4 },
1905 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
1906 { "event_mask", NULL, __spufs_event_mask_get, 8 },
1907 { "event_status", NULL, __spufs_event_status_get, 8 },
1908 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
1909 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
1910 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
1911 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
1912 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
1913 { "object-id", NULL, __spufs_object_id_get, 19 },
1914 { },
1915};
1916int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
1917
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index d805ffed892d..ae42e03b8c86 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -135,21 +135,11 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
135 return ret; 135 return ret;
136} 136}
137 137
138static u32 spu_hw_signal1_read(struct spu_context *ctx)
139{
140 return in_be32(&ctx->spu->problem->signal_notify1);
141}
142
143static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) 138static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
144{ 139{
145 out_be32(&ctx->spu->problem->signal_notify1, data); 140 out_be32(&ctx->spu->problem->signal_notify1, data);
146} 141}
147 142
148static u32 spu_hw_signal2_read(struct spu_context *ctx)
149{
150 return in_be32(&ctx->spu->problem->signal_notify2);
151}
152
153static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) 143static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
154{ 144{
155 out_be32(&ctx->spu->problem->signal_notify2, data); 145 out_be32(&ctx->spu->problem->signal_notify2, data);
@@ -217,21 +207,42 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
217 return ctx->spu->local_store; 207 return ctx->spu->local_store;
218} 208}
219 209
220static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) 210static u32 spu_hw_runcntl_read(struct spu_context *ctx)
221{ 211{
222 eieio(); 212 return in_be32(&ctx->spu->problem->spu_runcntl_RW);
223 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
224} 213}
225 214
226static void spu_hw_runcntl_stop(struct spu_context *ctx) 215static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
227{ 216{
228 spin_lock_irq(&ctx->spu->register_lock); 217 spin_lock_irq(&ctx->spu->register_lock);
229 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); 218 if (val & SPU_RUNCNTL_ISOLATE)
230 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) 219 out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
231 cpu_relax(); 220 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
232 spin_unlock_irq(&ctx->spu->register_lock); 221 spin_unlock_irq(&ctx->spu->register_lock);
233} 222}
234 223
224static void spu_hw_master_start(struct spu_context *ctx)
225{
226 struct spu *spu = ctx->spu;
227 u64 sr1;
228
229 spin_lock_irq(&spu->register_lock);
230 sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
231 spu_mfc_sr1_set(spu, sr1);
232 spin_unlock_irq(&spu->register_lock);
233}
234
235static void spu_hw_master_stop(struct spu_context *ctx)
236{
237 struct spu *spu = ctx->spu;
238 u64 sr1;
239
240 spin_lock_irq(&spu->register_lock);
241 sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
242 spu_mfc_sr1_set(spu, sr1);
243 spin_unlock_irq(&spu->register_lock);
244}
245
235static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) 246static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
236{ 247{
237 struct spu_problem __iomem *prob = ctx->spu->problem; 248 struct spu_problem __iomem *prob = ctx->spu->problem;
@@ -291,9 +302,7 @@ struct spu_context_ops spu_hw_ops = {
291 .mbox_stat_poll = spu_hw_mbox_stat_poll, 302 .mbox_stat_poll = spu_hw_mbox_stat_poll,
292 .ibox_read = spu_hw_ibox_read, 303 .ibox_read = spu_hw_ibox_read,
293 .wbox_write = spu_hw_wbox_write, 304 .wbox_write = spu_hw_wbox_write,
294 .signal1_read = spu_hw_signal1_read,
295 .signal1_write = spu_hw_signal1_write, 305 .signal1_write = spu_hw_signal1_write,
296 .signal2_read = spu_hw_signal2_read,
297 .signal2_write = spu_hw_signal2_write, 306 .signal2_write = spu_hw_signal2_write,
298 .signal1_type_set = spu_hw_signal1_type_set, 307 .signal1_type_set = spu_hw_signal1_type_set,
299 .signal1_type_get = spu_hw_signal1_type_get, 308 .signal1_type_get = spu_hw_signal1_type_get,
@@ -303,8 +312,10 @@ struct spu_context_ops spu_hw_ops = {
303 .npc_write = spu_hw_npc_write, 312 .npc_write = spu_hw_npc_write,
304 .status_read = spu_hw_status_read, 313 .status_read = spu_hw_status_read,
305 .get_ls = spu_hw_get_ls, 314 .get_ls = spu_hw_get_ls,
315 .runcntl_read = spu_hw_runcntl_read,
306 .runcntl_write = spu_hw_runcntl_write, 316 .runcntl_write = spu_hw_runcntl_write,
307 .runcntl_stop = spu_hw_runcntl_stop, 317 .master_start = spu_hw_master_start,
318 .master_stop = spu_hw_master_stop,
308 .set_mfc_query = spu_hw_set_mfc_query, 319 .set_mfc_query = spu_hw_set_mfc_query,
309 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, 320 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
310 .get_mfc_free_elements = spu_hw_get_mfc_free_elements, 321 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 427d00a4f6a0..738b9244382f 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -33,21 +33,22 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/parser.h> 34#include <linux/parser.h>
35 35
36#include <asm/io.h> 36#include <asm/prom.h>
37#include <asm/semaphore.h> 37#include <asm/semaphore.h>
38#include <asm/spu.h> 38#include <asm/spu.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40 40
41#include "spufs.h" 41#include "spufs.h"
42 42
43static kmem_cache_t *spufs_inode_cache; 43static struct kmem_cache *spufs_inode_cache;
44char *isolated_loader;
44 45
45static struct inode * 46static struct inode *
46spufs_alloc_inode(struct super_block *sb) 47spufs_alloc_inode(struct super_block *sb)
47{ 48{
48 struct spufs_inode_info *ei; 49 struct spufs_inode_info *ei;
49 50
50 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); 51 ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
51 if (!ei) 52 if (!ei)
52 return NULL; 53 return NULL;
53 54
@@ -64,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
64} 65}
65 66
66static void 67static void
67spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags) 68spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
68{ 69{
69 struct spufs_inode_info *ei = p; 70 struct spufs_inode_info *ei = p;
70 71
@@ -204,7 +205,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
204 struct dentry *dir; 205 struct dentry *dir;
205 int ret; 206 int ret;
206 207
207 dir = file->f_dentry; 208 dir = file->f_path.dentry;
208 parent = dir->d_parent->d_inode; 209 parent = dir->d_parent->d_inode;
209 ctx = SPUFS_I(dir->d_inode)->i_ctx; 210 ctx = SPUFS_I(dir->d_inode)->i_ctx;
210 211
@@ -231,6 +232,7 @@ struct file_operations spufs_context_fops = {
231 .readdir = dcache_readdir, 232 .readdir = dcache_readdir,
232 .fsync = simple_sync_file, 233 .fsync = simple_sync_file,
233}; 234};
235EXPORT_SYMBOL_GPL(spufs_context_fops);
234 236
235static int 237static int
236spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 238spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
@@ -255,10 +257,14 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
255 goto out_iput; 257 goto out_iput;
256 258
257 ctx->flags = flags; 259 ctx->flags = flags;
258
259 inode->i_op = &spufs_dir_inode_operations; 260 inode->i_op = &spufs_dir_inode_operations;
260 inode->i_fop = &simple_dir_operations; 261 inode->i_fop = &simple_dir_operations;
261 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); 262 if (flags & SPU_CREATE_NOSCHED)
263 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
264 mode, ctx);
265 else
266 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
267
262 if (ret) 268 if (ret)
263 goto out_free_ctx; 269 goto out_free_ctx;
264 270
@@ -307,6 +313,20 @@ static int spufs_create_context(struct inode *inode,
307{ 313{
308 int ret; 314 int ret;
309 315
316 ret = -EPERM;
317 if ((flags & SPU_CREATE_NOSCHED) &&
318 !capable(CAP_SYS_NICE))
319 goto out_unlock;
320
321 ret = -EINVAL;
322 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
323 == SPU_CREATE_ISOLATE)
324 goto out_unlock;
325
326 ret = -ENODEV;
327 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
328 goto out_unlock;
329
310 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO); 330 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
311 if (ret) 331 if (ret)
312 goto out_unlock; 332 goto out_unlock;
@@ -343,7 +363,7 @@ static int spufs_gang_close(struct inode *inode, struct file *file)
343 struct dentry *dir; 363 struct dentry *dir;
344 int ret; 364 int ret;
345 365
346 dir = file->f_dentry; 366 dir = file->f_path.dentry;
347 parent = dir->d_parent->d_inode; 367 parent = dir->d_parent->d_inode;
348 368
349 ret = spufs_rmgang(parent, dir); 369 ret = spufs_rmgang(parent, dir);
@@ -540,6 +560,30 @@ spufs_parse_options(char *options, struct inode *root)
540 return 1; 560 return 1;
541} 561}
542 562
563static void
564spufs_init_isolated_loader(void)
565{
566 struct device_node *dn;
567 const char *loader;
568 int size;
569
570 dn = of_find_node_by_path("/spu-isolation");
571 if (!dn)
572 return;
573
574 loader = get_property(dn, "loader", &size);
575 if (!loader)
576 return;
577
578 /* kmalloc should align on a 16 byte boundary..* */
579 isolated_loader = kmalloc(size, GFP_KERNEL);
580 if (!isolated_loader)
581 return;
582
583 memcpy(isolated_loader, loader, size);
584 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
585}
586
543static int 587static int
544spufs_create_root(struct super_block *sb, void *data) 588spufs_create_root(struct super_block *sb, void *data)
545{ 589{
@@ -608,6 +652,7 @@ static struct file_system_type spufs_type = {
608static int __init spufs_init(void) 652static int __init spufs_init(void)
609{ 653{
610 int ret; 654 int ret;
655
611 ret = -ENOMEM; 656 ret = -ENOMEM;
612 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 657 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
613 sizeof(struct spufs_inode_info), 0, 658 sizeof(struct spufs_inode_info), 0,
@@ -625,6 +670,12 @@ static int __init spufs_init(void)
625 ret = register_spu_syscalls(&spufs_calls); 670 ret = register_spu_syscalls(&spufs_calls);
626 if (ret) 671 if (ret)
627 goto out_fs; 672 goto out_fs;
673 ret = register_arch_coredump_calls(&spufs_coredump_calls);
674 if (ret)
675 goto out_fs;
676
677 spufs_init_isolated_loader();
678
628 return 0; 679 return 0;
629out_fs: 680out_fs:
630 unregister_filesystem(&spufs_type); 681 unregister_filesystem(&spufs_type);
@@ -638,6 +689,7 @@ module_init(spufs_init);
638static void __exit spufs_exit(void) 689static void __exit spufs_exit(void)
639{ 690{
640 spu_sched_exit(); 691 spu_sched_exit();
692 unregister_arch_coredump_calls(&spufs_coredump_calls);
641 unregister_spu_syscalls(&spufs_calls); 693 unregister_spu_syscalls(&spufs_calls);
642 unregister_filesystem(&spufs_type); 694 unregister_filesystem(&spufs_type);
643 kmem_cache_destroy(spufs_inode_cache); 695 kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 63df8cf4ba16..1acc2ffef8c8 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -1,7 +1,11 @@
1#define DEBUG
2
1#include <linux/wait.h> 3#include <linux/wait.h>
2#include <linux/ptrace.h> 4#include <linux/ptrace.h>
3 5
4#include <asm/spu.h> 6#include <asm/spu.h>
7#include <asm/spu_priv1.h>
8#include <asm/io.h>
5#include <asm/unistd.h> 9#include <asm/unistd.h>
6 10
7#include "spufs.h" 11#include "spufs.h"
@@ -24,6 +28,7 @@ void spufs_dma_callback(struct spu *spu, int type)
24 } else { 28 } else {
25 switch (type) { 29 switch (type) {
26 case SPE_EVENT_DMA_ALIGNMENT: 30 case SPE_EVENT_DMA_ALIGNMENT:
31 case SPE_EVENT_SPE_DATA_STORAGE:
27 case SPE_EVENT_INVALID_DMA: 32 case SPE_EVENT_INVALID_DMA:
28 force_sig(SIGBUS, /* info, */ current); 33 force_sig(SIGBUS, /* info, */ current);
29 break; 34 break;
@@ -48,15 +53,122 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
48 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 53 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
49} 54}
50 55
56static int spu_setup_isolated(struct spu_context *ctx)
57{
58 int ret;
59 u64 __iomem *mfc_cntl;
60 u64 sr1;
61 u32 status;
62 unsigned long timeout;
63 const u32 status_loading = SPU_STATUS_RUNNING
64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65
66 if (!isolated_loader)
67 return -ENODEV;
68
69 ret = spu_acquire_exclusive(ctx);
70 if (ret)
71 goto out;
72
73 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74
75 /* purge the MFC DMA queue to ensure no spurious accesses before we
76 * enter kernel mode */
77 timeout = jiffies + HZ;
78 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
79 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
80 != MFC_CNTL_PURGE_DMA_COMPLETE) {
81 if (time_after(jiffies, timeout)) {
82 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 __FUNCTION__);
84 ret = -EIO;
85 goto out_unlock;
86 }
87 cond_resched();
88 }
89
90 /* put the SPE in kernel mode to allow access to the loader */
91 sr1 = spu_mfc_sr1_get(ctx->spu);
92 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
93 spu_mfc_sr1_set(ctx->spu, sr1);
94
95 /* start the loader */
96 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
97 ctx->ops->signal2_write(ctx,
98 (unsigned long)isolated_loader & 0xffffffff);
99
100 ctx->ops->runcntl_write(ctx,
101 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
102
103 ret = 0;
104 timeout = jiffies + HZ;
105 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
106 status_loading) {
107 if (time_after(jiffies, timeout)) {
108 printk(KERN_ERR "%s: timeout waiting for loader\n",
109 __FUNCTION__);
110 ret = -EIO;
111 goto out_drop_priv;
112 }
113 cond_resched();
114 }
115
116 if (!(status & SPU_STATUS_RUNNING)) {
117 /* If isolated LOAD has failed: run SPU, we will get a stop-and
118 * signal later. */
119 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 ret = -EACCES;
122
123 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 /* This isn't allowed by the CBEA, but check anyway */
125 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 ret = -EINVAL;
128 }
129
130out_drop_priv:
131 /* Finished accessing the loader. Drop kernel mode */
132 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 spu_mfc_sr1_set(ctx->spu, sr1);
134
135out_unlock:
136 spu_release_exclusive(ctx);
137out:
138 return ret;
139}
140
51static inline int spu_run_init(struct spu_context *ctx, u32 * npc) 141static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
52{ 142{
53 int ret; 143 int ret;
144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
54 145
55 if ((ret = spu_acquire_runnable(ctx)) != 0) 146 ret = spu_acquire_runnable(ctx);
147 if (ret)
56 return ret; 148 return ret;
57 ctx->ops->npc_write(ctx, *npc); 149
58 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 150 if (ctx->flags & SPU_CREATE_ISOLATE) {
59 return 0; 151 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
152 /* Need to release ctx, because spu_setup_isolated will
153 * acquire it exclusively.
154 */
155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx);
157 if (!ret)
158 ret = spu_acquire_runnable(ctx);
159 }
160
161 /* if userspace has set the runcntrl register (eg, to issue an
162 * isolated exit), we need to re-set it here */
163 runcntl = ctx->ops->runcntl_read(ctx) &
164 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
165 if (runcntl == 0)
166 runcntl = SPU_RUNCNTL_RUNNABLE;
167 } else
168 ctx->ops->npc_write(ctx, *npc);
169
170 ctx->ops->runcntl_write(ctx, runcntl);
171 return ret;
60} 172}
61 173
62static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, 174static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
@@ -70,13 +182,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
70 182
71 if (signal_pending(current)) 183 if (signal_pending(current))
72 ret = -ERESTARTSYS; 184 ret = -ERESTARTSYS;
73 if (unlikely(current->ptrace & PT_PTRACED)) { 185
74 if ((*status & SPU_STATUS_STOPPED_BY_STOP)
75 && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
76 force_sig(SIGTRAP, current);
77 ret = -ERESTARTSYS;
78 }
79 }
80 return ret; 186 return ret;
81} 187}
82 188
@@ -204,6 +310,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
204 if (down_interruptible(&ctx->run_sema)) 310 if (down_interruptible(&ctx->run_sema))
205 return -ERESTARTSYS; 311 return -ERESTARTSYS;
206 312
313 ctx->ops->master_start(ctx);
207 ctx->event_return = 0; 314 ctx->event_return = 0;
208 ret = spu_run_init(ctx, npc); 315 ret = spu_run_init(ctx, npc);
209 if (ret) 316 if (ret)
@@ -223,7 +330,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
223 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 330 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
224 ret = spu_reacquire_runnable(ctx, npc, &status); 331 ret = spu_reacquire_runnable(ctx, npc, &status);
225 if (ret) 332 if (ret)
226 goto out; 333 goto out2;
227 continue; 334 continue;
228 } 335 }
229 ret = spu_process_events(ctx); 336 ret = spu_process_events(ctx);
@@ -231,12 +338,24 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
231 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 338 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
232 SPU_STATUS_STOPPED_BY_HALT))); 339 SPU_STATUS_STOPPED_BY_HALT)));
233 340
234 ctx->ops->runcntl_stop(ctx); 341 ctx->ops->master_stop(ctx);
235 ret = spu_run_fini(ctx, npc, &status); 342 ret = spu_run_fini(ctx, npc, &status);
236 if (!ret)
237 ret = status;
238 spu_yield(ctx); 343 spu_yield(ctx);
239 344
345out2:
346 if ((ret == 0) ||
347 ((ret == -ERESTARTSYS) &&
348 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
349 ((status & SPU_STATUS_STOPPED_BY_STOP) &&
350 (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
351 ret = status;
352
353 if ((status & SPU_STATUS_STOPPED_BY_STOP)
354 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
355 force_sig(SIGTRAP, current);
356 ret = -ERESTARTSYS;
357 }
358
240out: 359out:
241 *event = ctx->event_return; 360 *event = ctx->event_return;
242 up(&ctx->run_sema); 361 up(&ctx->run_sema);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index a0f55ca2d488..70fb13395c04 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -29,6 +29,7 @@
29 29
30#include <asm/spu.h> 30#include <asm/spu.h>
31#include <asm/spu_csa.h> 31#include <asm/spu_csa.h>
32#include <asm/spu_info.h>
32 33
33/* The magic number for our file system */ 34/* The magic number for our file system */
34enum { 35enum {
@@ -114,13 +115,19 @@ struct spu_context_ops {
114 void (*npc_write) (struct spu_context * ctx, u32 data); 115 void (*npc_write) (struct spu_context * ctx, u32 data);
115 u32(*status_read) (struct spu_context * ctx); 116 u32(*status_read) (struct spu_context * ctx);
116 char*(*get_ls) (struct spu_context * ctx); 117 char*(*get_ls) (struct spu_context * ctx);
118 u32 (*runcntl_read) (struct spu_context * ctx);
117 void (*runcntl_write) (struct spu_context * ctx, u32 data); 119 void (*runcntl_write) (struct spu_context * ctx, u32 data);
118 void (*runcntl_stop) (struct spu_context * ctx); 120 void (*master_start) (struct spu_context * ctx);
121 void (*master_stop) (struct spu_context * ctx);
119 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); 122 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
120 u32 (*read_mfc_tagstatus)(struct spu_context * ctx); 123 u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
121 u32 (*get_mfc_free_elements)(struct spu_context *ctx); 124 u32 (*get_mfc_free_elements)(struct spu_context *ctx);
122 int (*send_mfc_command)(struct spu_context *ctx, 125 int (*send_mfc_command)(struct spu_context * ctx,
123 struct mfc_dma_command *cmd); 126 struct mfc_dma_command * cmd);
127 void (*dma_info_read) (struct spu_context * ctx,
128 struct spu_dma_info * info);
129 void (*proxydma_info_read) (struct spu_context * ctx,
130 struct spu_proxydma_info * info);
124}; 131};
125 132
126extern struct spu_context_ops spu_hw_ops; 133extern struct spu_context_ops spu_hw_ops;
@@ -135,6 +142,7 @@ struct spufs_inode_info {
135 container_of(inode, struct spufs_inode_info, vfs_inode) 142 container_of(inode, struct spufs_inode_info, vfs_inode)
136 143
137extern struct tree_descr spufs_dir_contents[]; 144extern struct tree_descr spufs_dir_contents[];
145extern struct tree_descr spufs_dir_nosched_contents[];
138 146
139/* system call implementation */ 147/* system call implementation */
140long spufs_run_spu(struct file *file, 148long spufs_run_spu(struct file *file,
@@ -162,6 +170,12 @@ void spu_acquire(struct spu_context *ctx);
162void spu_release(struct spu_context *ctx); 170void spu_release(struct spu_context *ctx);
163int spu_acquire_runnable(struct spu_context *ctx); 171int spu_acquire_runnable(struct spu_context *ctx);
164void spu_acquire_saved(struct spu_context *ctx); 172void spu_acquire_saved(struct spu_context *ctx);
173int spu_acquire_exclusive(struct spu_context *ctx);
174
175static inline void spu_release_exclusive(struct spu_context *ctx)
176{
177 up_write(&ctx->state_sema);
178}
165 179
166int spu_activate(struct spu_context *ctx, u64 flags); 180int spu_activate(struct spu_context *ctx, u64 flags);
167void spu_deactivate(struct spu_context *ctx); 181void spu_deactivate(struct spu_context *ctx);
@@ -169,6 +183,8 @@ void spu_yield(struct spu_context *ctx);
169int __init spu_sched_init(void); 183int __init spu_sched_init(void);
170void __exit spu_sched_exit(void); 184void __exit spu_sched_exit(void);
171 185
186extern char *isolated_loader;
187
172/* 188/*
173 * spufs_wait 189 * spufs_wait
174 * Same as wait_event_interruptible(), except that here 190 * Same as wait_event_interruptible(), except that here
@@ -207,4 +223,15 @@ void spufs_stop_callback(struct spu *spu);
207void spufs_mfc_callback(struct spu *spu); 223void spufs_mfc_callback(struct spu *spu);
208void spufs_dma_callback(struct spu *spu, int type); 224void spufs_dma_callback(struct spu *spu, int type);
209 225
226extern struct spu_coredump_calls spufs_coredump_calls;
227struct spufs_coredump_reader {
228 char *name;
229 ssize_t (*read)(struct spu_context *ctx,
230 char __user *buffer, size_t size, loff_t *pos);
231 u64 (*get)(void *data);
232 size_t size;
233};
234extern struct spufs_coredump_reader spufs_coredump_read[];
235extern int spufs_coredump_num_notes;
236
210#endif 237#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 0f782ca662ba..c08981ff7fc6 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -102,7 +102,7 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
102 * saved at this time. 102 * saved at this time.
103 */ 103 */
104 isolate_state = SPU_STATUS_ISOLATED_STATE | 104 isolate_state = SPU_STATUS_ISOLATED_STATE |
105 SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS; 105 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; 106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
107} 107}
108 108
@@ -1046,12 +1046,12 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1046 */ 1046 */
1047 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { 1047 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1048 if (in_be32(&prob->spu_status_R) & 1048 if (in_be32(&prob->spu_status_R) &
1049 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1049 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1050 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1050 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1051 SPU_STATUS_RUNNING); 1051 SPU_STATUS_RUNNING);
1052 } 1052 }
1053 if ((in_be32(&prob->spu_status_R) & 1053 if ((in_be32(&prob->spu_status_R) &
1054 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1054 SPU_STATUS_ISOLATED_LOAD_STATUS)
1055 || (in_be32(&prob->spu_status_R) & 1055 || (in_be32(&prob->spu_status_R) &
1056 SPU_STATUS_ISOLATED_STATE)) { 1056 SPU_STATUS_ISOLATED_STATE)) {
1057 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1057 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -1085,7 +1085,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1085 */ 1085 */
1086 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 1086 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1087 if (in_be32(&prob->spu_status_R) & 1087 if (in_be32(&prob->spu_status_R) &
1088 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1088 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1089 spu_mfc_sr1_set(spu, 1089 spu_mfc_sr1_set(spu,
1090 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1090 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1091 eieio(); 1091 eieio();
@@ -1095,7 +1095,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1095 SPU_STATUS_RUNNING); 1095 SPU_STATUS_RUNNING);
1096 } 1096 }
1097 if ((in_be32(&prob->spu_status_R) & 1097 if ((in_be32(&prob->spu_status_R) &
1098 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1098 SPU_STATUS_ISOLATED_LOAD_STATUS)
1099 || (in_be32(&prob->spu_status_R) & 1099 || (in_be32(&prob->spu_status_R) &
1100 SPU_STATUS_ISOLATED_STATE)) { 1100 SPU_STATUS_ISOLATED_STATE)) {
1101 spu_mfc_sr1_set(spu, 1101 spu_mfc_sr1_set(spu,
@@ -1916,6 +1916,51 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
1916 wait_spu_stopped(prev, spu); /* Step 57. */ 1916 wait_spu_stopped(prev, spu); /* Step 57. */
1917} 1917}
1918 1918
1919static void force_spu_isolate_exit(struct spu *spu)
1920{
1921 struct spu_problem __iomem *prob = spu->problem;
1922 struct spu_priv2 __iomem *priv2 = spu->priv2;
1923
1924 /* Stop SPE execution and wait for completion. */
1925 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1926 iobarrier_rw();
1927 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1928
1929 /* Restart SPE master runcntl. */
1930 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1931 iobarrier_w();
1932
1933 /* Initiate isolate exit request and wait for completion. */
1934 out_be64(&priv2->spu_privcntl_RW, 4LL);
1935 iobarrier_w();
1936 out_be32(&prob->spu_runcntl_RW, 2);
1937 iobarrier_rw();
1938 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1939 & SPU_STATUS_STOPPED_BY_STOP));
1940
1941 /* Reset load request to normal. */
1942 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1943 iobarrier_w();
1944}
1945
1946/**
1947 * stop_spu_isolate
1948 * Check SPU run-control state and force isolated
1949 * exit function as necessary.
1950 */
1951static void stop_spu_isolate(struct spu *spu)
1952{
1953 struct spu_problem __iomem *prob = spu->problem;
1954
1955 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1956 /* The SPU is in isolated state; the only way
1957 * to get it out is to perform an isolated
1958 * exit (clean) operation.
1959 */
1960 force_spu_isolate_exit(spu);
1961 }
1962}
1963
1919static void harvest(struct spu_state *prev, struct spu *spu) 1964static void harvest(struct spu_state *prev, struct spu *spu)
1920{ 1965{
1921 /* 1966 /*
@@ -1928,6 +1973,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
1928 inhibit_user_access(prev, spu); /* Step 3. */ 1973 inhibit_user_access(prev, spu); /* Step 3. */
1929 terminate_spu_app(prev, spu); /* Step 4. */ 1974 terminate_spu_app(prev, spu); /* Step 4. */
1930 set_switch_pending(prev, spu); /* Step 5. */ 1975 set_switch_pending(prev, spu); /* Step 5. */
1976 stop_spu_isolate(spu); /* NEW. */
1931 remove_other_spu_access(prev, spu); /* Step 6. */ 1977 remove_other_spu_access(prev, spu); /* Step 6. */
1932 suspend_mfc(prev, spu); /* Step 7. */ 1978 suspend_mfc(prev, spu); /* Step 7. */
1933 wait_suspend_mfc_complete(prev, spu); /* Step 8. */ 1979 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
@@ -2096,11 +2142,11 @@ int spu_save(struct spu_state *prev, struct spu *spu)
2096 acquire_spu_lock(spu); /* Step 1. */ 2142 acquire_spu_lock(spu); /* Step 1. */
2097 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2143 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2098 release_spu_lock(spu); 2144 release_spu_lock(spu);
2099 if (rc) { 2145 if (rc != 0 && rc != 2 && rc != 6) {
2100 panic("%s failed on SPU[%d], rc=%d.\n", 2146 panic("%s failed on SPU[%d], rc=%d.\n",
2101 __func__, spu->number, rc); 2147 __func__, spu->number, rc);
2102 } 2148 }
2103 return rc; 2149 return 0;
2104} 2150}
2105EXPORT_SYMBOL_GPL(spu_save); 2151EXPORT_SYMBOL_GPL(spu_save);
2106 2152
@@ -2165,9 +2211,6 @@ static void init_priv1(struct spu_state *csa)
2165 MFC_STATE1_PROBLEM_STATE_MASK | 2211 MFC_STATE1_PROBLEM_STATE_MASK |
2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; 2212 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2167 2213
2168 /* Set storage description. */
2169 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
2170
2171 /* Enable OS-specific set of interrupts. */ 2214 /* Enable OS-specific set of interrupts. */
2172 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | 2215 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2173 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | 2216 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index a6d1ae4dc2a3..8e37bdf4dfda 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -46,7 +46,7 @@ static long do_spu_run(struct file *filp,
46 if (filp->f_op != &spufs_context_fops) 46 if (filp->f_op != &spufs_context_fops)
47 goto out; 47 goto out;
48 48
49 i = SPUFS_I(filp->f_dentry->d_inode); 49 i = SPUFS_I(filp->f_path.dentry->d_inode);
50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
51 51
52 if (put_user(npc, unpc)) 52 if (put_user(npc, unpc))
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
index 996c28744e96..63f0aee4c158 100644
--- a/arch/powerpc/platforms/chrp/chrp.h
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -9,4 +9,3 @@ extern long chrp_time_init(void);
9 9
10extern void chrp_find_bridges(void); 10extern void chrp_find_bridges(void);
11extern void chrp_event_scan(unsigned long); 11extern void chrp_event_scan(unsigned long);
12extern void chrp_pcibios_fixup(void);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 0f4340506c75..ddb4a116ea89 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -156,15 +156,6 @@ hydra_init(void)
156 return 1; 156 return 1;
157} 157}
158 158
159void __init
160chrp_pcibios_fixup(void)
161{
162 struct pci_dev *dev = NULL;
163
164 for_each_pci_dev(dev)
165 pci_read_irq_line(dev);
166}
167
168#define PRG_CL_RESET_VALID 0x00010000 159#define PRG_CL_RESET_VALID 0x00010000
169 160
170static void __init 161static void __init
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 49b8dabcbc99..e1f51d455984 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -588,7 +588,6 @@ static int __init chrp_probe(void)
588 ISA_DMA_THRESHOLD = ~0L; 588 ISA_DMA_THRESHOLD = ~0L;
589 DMA_MODE_READ = 0x44; 589 DMA_MODE_READ = 0x44;
590 DMA_MODE_WRITE = 0x48; 590 DMA_MODE_WRITE = 0x48;
591 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
592 591
593 return 1; 592 return 1;
594} 593}
@@ -600,7 +599,6 @@ define_machine(chrp) {
600 .init = chrp_init2, 599 .init = chrp_init2,
601 .show_cpuinfo = chrp_show_cpuinfo, 600 .show_cpuinfo = chrp_show_cpuinfo,
602 .init_IRQ = chrp_init_IRQ, 601 .init_IRQ = chrp_init_IRQ,
603 .pcibios_fixup = chrp_pcibios_fixup,
604 .restart = rtas_restart, 602 .restart = rtas_restart,
605 .power_off = rtas_power_off, 603 .power_off = rtas_power_off,
606 .halt = rtas_halt, 604 .halt = rtas_halt,
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 234a861870a8..b3c2ce4cb7a8 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -35,7 +35,7 @@ config HDPU
35 Select HDPU if configuring a Sky Computers Compute Blade. 35 Select HDPU if configuring a Sky Computers Compute Blade.
36 36
37config HDPU_FEATURES 37config HDPU_FEATURES
38 depends HDPU 38 depends on HDPU
39 tristate "HDPU-Features" 39 tristate "HDPU-Features"
40 help 40 help
41 Select to enable HDPU enhanced features. 41 Select to enable HDPU enhanced features.
@@ -74,6 +74,18 @@ config SANDPOINT
74 Select SANDPOINT if configuring for a Motorola Sandpoint X3 74 Select SANDPOINT if configuring for a Motorola Sandpoint X3
75 (any flavor). 75 (any flavor).
76 76
77config LINKSTATION
78 bool "Linkstation / Kurobox(HG) from Buffalo"
79 select MPIC
80 select FSL_SOC
81 select PPC_UDBG_16550 if SERIAL_8250
82 help
83 Select LINKSTATION if configuring for one of PPC- (MPC8241)
84 based NAS systems from Buffalo Technology. So far only
85 KuroboxHG has been tested. In the future classical Kurobox,
86 Linkstation-I HD-HLAN and HD-HGLAN versions, and PPC-based
87 Terastation systems should be supported too.
88
77config MPC7448HPC2 89config MPC7448HPC2
78 bool "Freescale MPC7448HPC2(Taiga)" 90 bool "Freescale MPC7448HPC2(Taiga)"
79 select TSI108_BRIDGE 91 select TSI108_BRIDGE
@@ -146,15 +158,6 @@ config PQ2FADS
146 Select PQ2FADS if you wish to configure for a Freescale 158 Select PQ2FADS if you wish to configure for a Freescale
147 PQ2FADS board (-VR or -ZU). 159 PQ2FADS board (-VR or -ZU).
148 160
149config LITE5200
150 bool "Freescale LITE5200 / (IceCube)"
151 select PPC_MPC52xx
152 help
153 Support for the LITE5200 dev board for the MPC5200 from Freescale.
154 This is for the LITE5200 version 2.0 board. Don't know if it changes
155 much but it's only been tested on this board version. I think this
156 board is also known as IceCube.
157
158config EV64360 161config EV64360
159 bool "Marvell-EV64360BP" 162 bool "Marvell-EV64360BP"
160 help 163 help
@@ -172,9 +175,6 @@ config TQM8xxL
172 depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L) 175 depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
173 default y 176 default y
174 177
175config PPC_MPC52xx
176 bool
177
178config 8260 178config 8260
179 bool "CPM2 Support" if WILLOW 179 bool "CPM2 Support" if WILLOW
180 depends on 6xx 180 depends on 6xx
@@ -208,7 +208,7 @@ config PPC_GEN550
208 depends on SANDPOINT || SPRUCE || PPLUS || \ 208 depends on SANDPOINT || SPRUCE || PPLUS || \
209 PRPMC750 || PRPMC800 || LOPEC || \ 209 PRPMC750 || PRPMC800 || LOPEC || \
210 (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \ 210 (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
211 83xx 211 83xx || LINKSTATION
212 default y 212 default y
213 213
214config FORCE 214config FORCE
@@ -282,13 +282,13 @@ config EPIC_SERIAL_MODE
282 282
283config MPC10X_BRIDGE 283config MPC10X_BRIDGE
284 bool 284 bool
285 depends on POWERPMC250 || LOPEC || SANDPOINT 285 depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
286 select PPC_INDIRECT_PCI 286 select PPC_INDIRECT_PCI
287 default y 287 default y
288 288
289config MPC10X_OPENPIC 289config MPC10X_OPENPIC
290 bool 290 bool
291 depends on POWERPMC250 || LOPEC || SANDPOINT 291 depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
292 default y 292 default y
293 293
294config MPC10X_STORE_GATHERING 294config MPC10X_STORE_GATHERING
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index fa499fe59291..d3d11a3cd656 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -2,3 +2,4 @@
2# Makefile for the 6xx/7xx/7xxxx linux kernel. 2# Makefile for the 6xx/7xx/7xxxx linux kernel.
3# 3#
4obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o 4obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o
5obj-$(CONFIG_LINKSTATION) += linkstation.o ls_uart.o
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
new file mode 100644
index 000000000000..61599d919ea8
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -0,0 +1,211 @@
1/*
2 * Board setup routines for the Buffalo Linkstation / Kurobox Platform.
3 *
4 * Copyright (C) 2006 G. Liakhovetski (g.liakhovetski@gmx.de)
5 *
6 * Based on sandpoint.c by Mark A. Greer
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of
10 * any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/initrd.h>
16#include <linux/root_dev.h>
17#include <linux/mtd/physmap.h>
18
19#include <asm/time.h>
20#include <asm/prom.h>
21#include <asm/mpic.h>
22#include <asm/mpc10x.h>
23#include <asm/pci-bridge.h>
24
25static struct mtd_partition linkstation_physmap_partitions[] = {
26 {
27 .name = "mtd_firmimg",
28 .offset = 0x000000,
29 .size = 0x300000,
30 },
31 {
32 .name = "mtd_bootcode",
33 .offset = 0x300000,
34 .size = 0x070000,
35 },
36 {
37 .name = "mtd_status",
38 .offset = 0x370000,
39 .size = 0x010000,
40 },
41 {
42 .name = "mtd_conf",
43 .offset = 0x380000,
44 .size = 0x080000,
45 },
46 {
47 .name = "mtd_allflash",
48 .offset = 0x000000,
49 .size = 0x400000,
50 },
51 {
52 .name = "mtd_data",
53 .offset = 0x310000,
54 .size = 0x0f0000,
55 },
56};
57
58static int __init add_bridge(struct device_node *dev)
59{
60 int len;
61 struct pci_controller *hose;
62 int *bus_range;
63
64 printk("Adding PCI host bridge %s\n", dev->full_name);
65
66 bus_range = (int *) get_property(dev, "bus-range", &len);
67 if (bus_range == NULL || len < 2 * sizeof(int))
68 printk(KERN_WARNING "Can't get bus-range for %s, assume"
69 " bus 0\n", dev->full_name);
70
71 hose = pcibios_alloc_controller();
72 if (hose == NULL)
73 return -ENOMEM;
74 hose->first_busno = bus_range ? bus_range[0] : 0;
75 hose->last_busno = bus_range ? bus_range[1] : 0xff;
76 hose->arch_data = dev;
77 setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
78
79 /* Interpret the "ranges" property */
80 /* This also maps the I/O region and sets isa_io/mem_base */
81 pci_process_bridge_OF_ranges(hose, dev, 1);
82
83 return 0;
84}
85
86static void __init linkstation_setup_arch(void)
87{
88 struct device_node *np;
89#ifdef CONFIG_MTD_PHYSMAP
90 physmap_set_partitions(linkstation_physmap_partitions,
91 ARRAY_SIZE(linkstation_physmap_partitions));
92#endif
93
94#ifdef CONFIG_BLK_DEV_INITRD
95 if (initrd_start)
96 ROOT_DEV = Root_RAM0;
97 else
98#endif
99#ifdef CONFIG_ROOT_NFS
100 ROOT_DEV = Root_NFS;
101#else
102 ROOT_DEV = Root_HDA1;
103#endif
104
105 /* Lookup PCI host bridges */
106 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
107 add_bridge(np);
108
109 printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
110 printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
111}
112
113/*
114 * Interrupt setup and service. Interrrupts on the linkstation come
115 * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
116 */
117static void __init linkstation_init_IRQ(void)
118{
119 struct mpic *mpic;
120 struct device_node *dnp;
121 void *prop;
122 int size;
123 phys_addr_t paddr;
124
125 dnp = of_find_node_by_type(NULL, "open-pic");
126 if (dnp == NULL)
127 return;
128
129 prop = (struct device_node *)get_property(dnp, "reg", &size);
130 paddr = (phys_addr_t)of_translate_address(dnp, prop);
131
132 mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC ");
133 BUG_ON(mpic == NULL);
134
135 /* PCI IRQs */
136 mpic_assign_isu(mpic, 0, paddr + 0x10200);
137
138 /* I2C */
139 mpic_assign_isu(mpic, 1, paddr + 0x11000);
140
141 /* ttyS0, ttyS1 */
142 mpic_assign_isu(mpic, 2, paddr + 0x11100);
143
144 mpic_init(mpic);
145}
146
147extern void avr_uart_configure(void);
148extern void avr_uart_send(const char);
149
150static void linkstation_restart(char *cmd)
151{
152 local_irq_disable();
153
154 /* Reset system via AVR */
155 avr_uart_configure();
156 /* Send reboot command */
157 avr_uart_send('C');
158
159 for(;;) /* Spin until reset happens */
160 avr_uart_send('G'); /* "kick" */
161}
162
163static void linkstation_power_off(void)
164{
165 local_irq_disable();
166
167 /* Power down system via AVR */
168 avr_uart_configure();
169 /* send shutdown command */
170 avr_uart_send('E');
171
172 for(;;) /* Spin until power-off happens */
173 avr_uart_send('G'); /* "kick" */
174 /* NOTREACHED */
175}
176
177static void linkstation_halt(void)
178{
179 linkstation_power_off();
180 /* NOTREACHED */
181}
182
183static void linkstation_show_cpuinfo(struct seq_file *m)
184{
185 seq_printf(m, "vendor\t\t: Buffalo Technology\n");
186 seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n");
187}
188
189static int __init linkstation_probe(void)
190{
191 unsigned long root;
192
193 root = of_get_flat_dt_root();
194
195 if (!of_flat_dt_is_compatible(root, "linkstation"))
196 return 0;
197 return 1;
198}
199
200define_machine(linkstation){
201 .name = "Buffalo Linkstation",
202 .probe = linkstation_probe,
203 .setup_arch = linkstation_setup_arch,
204 .init_IRQ = linkstation_init_IRQ,
205 .show_cpuinfo = linkstation_show_cpuinfo,
206 .get_irq = mpic_get_irq,
207 .restart = linkstation_restart,
208 .power_off = linkstation_power_off,
209 .halt = linkstation_halt,
210 .calibrate_decr = generic_calibrate_decr,
211};
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
new file mode 100644
index 000000000000..0e837762cc5b
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -0,0 +1,131 @@
1#include <linux/workqueue.h>
2#include <linux/string.h>
3#include <linux/delay.h>
4#include <linux/serial_reg.h>
5#include <linux/serial_8250.h>
6#include <asm/io.h>
7#include <asm/mpc10x.h>
8#include <asm/ppc_sys.h>
9#include <asm/prom.h>
10#include <asm/termbits.h>
11
12static void __iomem *avr_addr;
13static unsigned long avr_clock;
14
15static struct work_struct wd_work;
16
17static void wd_stop(struct work_struct *unused)
18{
19 const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
20 int i = 0, rescue = 8;
21 int len = strlen(string);
22
23 while (rescue--) {
24 int j;
25 char lsr = in_8(avr_addr + UART_LSR);
26
27 if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
28 for (j = 0; j < 16 && i < len; j++, i++)
29 out_8(avr_addr + UART_TX, string[i]);
30 if (i == len) {
31 /* Read "OK" back: 4ms for the last "KKKK"
32 plus a couple bytes back */
33 msleep(7);
34 printk("linkstation: disarming the AVR watchdog: ");
35 while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
36 printk("%c", in_8(avr_addr + UART_RX));
37 break;
38 }
39 }
40 msleep(17);
41 }
42 printk("\n");
43}
44
45#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
46
47void avr_uart_configure(void)
48{
49 unsigned char cval = UART_LCR_WLEN8;
50 unsigned int quot = AVR_QUOT(avr_clock);
51
52 if (!avr_addr || !avr_clock)
53 return;
54
55 out_8(avr_addr + UART_LCR, cval); /* initialise UART */
56 out_8(avr_addr + UART_MCR, 0);
57 out_8(avr_addr + UART_IER, 0);
58
59 cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
60
61 out_8(avr_addr + UART_LCR, cval); /* Set character format */
62
63 out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
64 out_8(avr_addr + UART_DLL, quot & 0xff); /* LS of divisor */
65 out_8(avr_addr + UART_DLM, quot >> 8); /* MS of divisor */
66 out_8(avr_addr + UART_LCR, cval); /* reset DLAB */
67 out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
68}
69
70void avr_uart_send(const char c)
71{
72 if (!avr_addr || !avr_clock)
73 return;
74
75 out_8(avr_addr + UART_TX, c);
76 out_8(avr_addr + UART_TX, c);
77 out_8(avr_addr + UART_TX, c);
78 out_8(avr_addr + UART_TX, c);
79}
80
81static void __init ls_uart_init(void)
82{
83 local_irq_disable();
84
85#ifndef CONFIG_SERIAL_8250
86 out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
87 out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
88 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); /* clear FIFOs */
89 out_8(avr_addr + UART_FCR, 0);
90 out_8(avr_addr + UART_IER, 0);
91
92 /* Clear up interrupts */
93 (void) in_8(avr_addr + UART_LSR);
94 (void) in_8(avr_addr + UART_RX);
95 (void) in_8(avr_addr + UART_IIR);
96 (void) in_8(avr_addr + UART_MSR);
97#endif
98 avr_uart_configure();
99
100 local_irq_enable();
101}
102
103static int __init ls_uarts_init(void)
104{
105 struct device_node *avr;
106 phys_addr_t phys_addr;
107 int len;
108
109 avr = of_find_node_by_path("/soc10x/serial@80004500");
110 if (!avr)
111 return -EINVAL;
112
113 avr_clock = *(u32*)get_property(avr, "clock-frequency", &len);
114 phys_addr = ((u32*)get_property(avr, "reg", &len))[0];
115
116 if (!avr_clock || !phys_addr)
117 return -EINVAL;
118
119 avr_addr = ioremap(phys_addr, 32);
120 if (!avr_addr)
121 return -EFAULT;
122
123 ls_uart_init();
124
125 INIT_WORK(&wd_work, wd_stop);
126 schedule_work(&wd_work);
127
128 return 0;
129}
130
131late_initcall(ls_uarts_init);
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index bdb475c65cba..3fcc85f60fbf 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -60,7 +60,7 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
60 60
61extern int tsi108_setup_pci(struct device_node *dev); 61extern int tsi108_setup_pci(struct device_node *dev);
62extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); 62extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
63extern void tsi108_pci_int_init(void); 63extern void tsi108_pci_int_init(struct device_node *node);
64extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc); 64extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
65 65
66int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn) 66int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
@@ -71,65 +71,6 @@ int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
71 return PCIBIOS_SUCCESSFUL; 71 return PCIBIOS_SUCCESSFUL;
72} 72}
73 73
74/*
75 * find pci slot by devfn in interrupt map of OF tree
76 */
77u8 find_slot_by_devfn(unsigned int *interrupt_map, unsigned int devfn)
78{
79 int i;
80 unsigned int tmp;
81 for (i = 0; i < 4; i++){
82 tmp = interrupt_map[i*4*7];
83 if ((tmp >> 11) == (devfn >> 3))
84 return i;
85 }
86 return i;
87}
88
89/*
90 * Scans the interrupt map for pci device
91 */
92void mpc7448_hpc2_fixup_irq(struct pci_dev *dev)
93{
94 struct pci_controller *hose;
95 struct device_node *node;
96 const unsigned int *interrupt;
97 int busnr;
98 int len;
99 u8 slot;
100 u8 pin;
101
102 /* Lookup the hose */
103 busnr = dev->bus->number;
104 hose = pci_bus_to_hose(busnr);
105 if (!hose)
106 printk(KERN_ERR "No pci hose found\n");
107
108 /* Check it has an OF node associated */
109 node = (struct device_node *) hose->arch_data;
110 if (!node)
111 printk(KERN_ERR "No pci node found\n");
112
113 interrupt = get_property(node, "interrupt-map", &len);
114 slot = find_slot_by_devfn(interrupt, dev->devfn);
115 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
116 if (pin == 0 || pin > 4)
117 pin = 1;
118 pin--;
119 dev->irq = interrupt[slot*4*7 + pin*7 + 5];
120 DBG("TSI_PCI: dev->irq = 0x%x\n", dev->irq);
121}
122/* temporary pci irq map fixup*/
123
124void __init mpc7448_hpc2_pcibios_fixup(void)
125{
126 struct pci_dev *dev = NULL;
127 for_each_pci_dev(dev) {
128 mpc7448_hpc2_fixup_irq(dev);
129 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
130 }
131}
132
133static void __init mpc7448_hpc2_setup_arch(void) 74static void __init mpc7448_hpc2_setup_arch(void)
134{ 75{
135 struct device_node *cpu; 76 struct device_node *cpu;
@@ -192,9 +133,12 @@ static void __init mpc7448_hpc2_init_IRQ(void)
192{ 133{
193 struct mpic *mpic; 134 struct mpic *mpic;
194 phys_addr_t mpic_paddr = 0; 135 phys_addr_t mpic_paddr = 0;
136 struct device_node *tsi_pic;
137#ifdef CONFIG_PCI
195 unsigned int cascade_pci_irq; 138 unsigned int cascade_pci_irq;
196 struct device_node *tsi_pci; 139 struct device_node *tsi_pci;
197 struct device_node *tsi_pic; 140 struct device_node *cascade_node = NULL;
141#endif
198 142
199 tsi_pic = of_find_node_by_type(NULL, "open-pic"); 143 tsi_pic = of_find_node_by_type(NULL, "open-pic");
200 if (tsi_pic) { 144 if (tsi_pic) {
@@ -208,31 +152,41 @@ static void __init mpc7448_hpc2_init_IRQ(void)
208 return; 152 return;
209 } 153 }
210 154
211 DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__, 155 DBG("%s: tsi108 pic phys_addr = 0x%x\n", __FUNCTION__,
212 (u32) mpic_paddr); 156 (u32) mpic_paddr);
213 157
214 mpic = mpic_alloc(tsi_pic, mpic_paddr, 158 mpic = mpic_alloc(tsi_pic, mpic_paddr,
215 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | 159 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
216 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, 160 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
217 0, /* num_sources used */ 161 24,
218 0, /* num_sources used */ 162 NR_IRQS-4, /* num_sources used */
219 "Tsi108_PIC"); 163 "Tsi108_PIC");
220 164
221 BUG_ON(mpic == NULL); /* XXXX */ 165 BUG_ON(mpic == NULL);
166
167 mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
168
222 mpic_init(mpic); 169 mpic_init(mpic);
223 170
171#ifdef CONFIG_PCI
224 tsi_pci = of_find_node_by_type(NULL, "pci"); 172 tsi_pci = of_find_node_by_type(NULL, "pci");
225 if (tsi_pci == 0) { 173 if (tsi_pci == NULL) {
226 printk("%s: No tsi108 pci node found !\n", __FUNCTION__); 174 printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
227 return; 175 return;
228 } 176 }
177 cascade_node = of_find_node_by_type(NULL, "pic-router");
178 if (cascade_node == NULL) {
179 printk("%s: No tsi108 pci cascade node found !\n", __FUNCTION__);
180 return;
181 }
229 182
230 cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); 183 cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
184 DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __FUNCTION__,
185 (u32) cascade_pci_irq);
186 tsi108_pci_int_init(cascade_node);
231 set_irq_data(cascade_pci_irq, mpic); 187 set_irq_data(cascade_pci_irq, mpic);
232 set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); 188 set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
233 189#endif
234 tsi108_pci_int_init();
235
236 /* Configure MPIC outputs to CPU0 */ 190 /* Configure MPIC outputs to CPU0 */
237 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); 191 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
238 of_node_put(tsi_pic); 192 of_node_put(tsi_pic);
@@ -290,7 +244,6 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
290 return 1; 244 return 1;
291 } 245 }
292 return 0; 246 return 0;
293
294} 247}
295 248
296define_machine(mpc7448_hpc2){ 249define_machine(mpc7448_hpc2){
@@ -300,7 +253,6 @@ define_machine(mpc7448_hpc2){
300 .init_IRQ = mpc7448_hpc2_init_IRQ, 253 .init_IRQ = mpc7448_hpc2_init_IRQ,
301 .show_cpuinfo = mpc7448_hpc2_show_cpuinfo, 254 .show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
302 .get_irq = mpic_get_irq, 255 .get_irq = mpic_get_irq,
303 .pcibios_fixup = mpc7448_hpc2_pcibios_fixup,
304 .restart = mpc7448_hpc2_restart, 256 .restart = mpc7448_hpc2_restart,
305 .calibrate_decr = generic_calibrate_decr, 257 .calibrate_decr = generic_calibrate_decr,
306 .machine_check_exception= mpc7448_machine_check_exception, 258 .machine_check_exception= mpc7448_machine_check_exception,
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 887b68804e6d..54e6b3b6f261 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -31,5 +31,5 @@ endmenu
31 31
32config VIOPATH 32config VIOPATH
33 bool 33 bool
34 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH 34 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || ISERIES_VETH
35 default y 35 default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
index dee4eb4d8bec..13ac3015d91c 100644
--- a/arch/powerpc/platforms/iseries/Makefile
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -1,5 +1,7 @@
1EXTRA_CFLAGS += -mno-minimal-toc 1EXTRA_CFLAGS += -mno-minimal-toc
2 2
3extra-y += dt.o
4
3obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \ 5obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
4 hvcall.o proc.o htab.o iommu.o misc.o irq.o 6 hvcall.o proc.o htab.o iommu.o misc.o irq.o
5obj-$(CONFIG_PCI) += pci.o vpdinfo.o 7obj-$(CONFIG_PCI) += pci.o vpdinfo.o
@@ -7,5 +9,9 @@ obj-$(CONFIG_SMP) += smp.o
7obj-$(CONFIG_VIOPATH) += viopath.o 9obj-$(CONFIG_VIOPATH) += viopath.o
8obj-$(CONFIG_MODULES) += ksyms.o 10obj-$(CONFIG_MODULES) += ksyms.o
9 11
12quiet_cmd_dt_strings = DT_STR $@
13 cmd_dt_strings = $(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings \
14 $< $@
15
10$(obj)/dt_mod.o: $(obj)/dt.o 16$(obj)/dt_mod.o: $(obj)/dt.o
11 @$(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings $(obj)/dt.o $(obj)/dt_mod.o 17 $(call if_changed,dt_strings)
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index e305deee7f44..9e8a334a518a 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -41,6 +41,7 @@
41#include "call_pci.h" 41#include "call_pci.h"
42#include "pci.h" 42#include "pci.h"
43#include "it_exp_vpd_panel.h" 43#include "it_exp_vpd_panel.h"
44#include "naca.h"
44 45
45#ifdef DEBUG 46#ifdef DEBUG
46#define DBG(fmt...) udbg_printf(fmt) 47#define DBG(fmt...) udbg_printf(fmt)
@@ -205,13 +206,11 @@ static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
205 dt_prop(dt, name, &data, sizeof(u32)); 206 dt_prop(dt, name, &data, sizeof(u32));
206} 207}
207 208
208#ifdef notyet
209static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name, 209static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
210 u64 data) 210 u64 data)
211{ 211{
212 dt_prop(dt, name, &data, sizeof(u64)); 212 dt_prop(dt, name, &data, sizeof(u64));
213} 213}
214#endif
215 214
216static void __init dt_prop_u64_list(struct iseries_flat_dt *dt, 215static void __init dt_prop_u64_list(struct iseries_flat_dt *dt,
217 const char *name, u64 *data, int n) 216 const char *name, u64 *data, int n)
@@ -306,6 +305,17 @@ static void __init dt_model(struct iseries_flat_dt *dt)
306 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex()); 305 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
307} 306}
308 307
308static void __init dt_initrd(struct iseries_flat_dt *dt)
309{
310#ifdef CONFIG_BLK_DEV_INITRD
311 if (naca.xRamDisk) {
312 dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
313 dt_prop_u64(dt, "linux,initrd-end",
314 (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
315 }
316#endif
317}
318
309static void __init dt_do_vdevice(struct iseries_flat_dt *dt, 319static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
310 const char *name, u32 reg, int unit, 320 const char *name, u32 reg, int unit,
311 const char *type, const char *compat, int end) 321 const char *type, const char *compat, int end)
@@ -641,6 +651,7 @@ void * __init build_flat_dt(unsigned long phys_mem_size)
641 /* /chosen */ 651 /* /chosen */
642 dt_start_node(iseries_dt, "chosen"); 652 dt_start_node(iseries_dt, "chosen");
643 dt_prop_str(iseries_dt, "bootargs", cmd_line); 653 dt_prop_str(iseries_dt, "bootargs", cmd_line);
654 dt_initrd(iseries_dt);
644 dt_end_node(iseries_dt); 655 dt_end_node(iseries_dt);
645 656
646 dt_cpus(iseries_dt); 657 dt_cpus(iseries_dt);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 218817d13c5c..d7a756d5135c 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/pci.h>
30 31
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/tce.h> 33#include <asm/tce.h>
@@ -114,12 +115,10 @@ void iommu_table_getparms_iSeries(unsigned long busno,
114{ 115{
115 struct iommu_table_cb *parms; 116 struct iommu_table_cb *parms;
116 117
117 parms = kmalloc(sizeof(*parms), GFP_KERNEL); 118 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
118 if (parms == NULL) 119 if (parms == NULL)
119 panic("PCI_DMA: TCE Table Allocation failed."); 120 panic("PCI_DMA: TCE Table Allocation failed.");
120 121
121 memset(parms, 0, sizeof(*parms));
122
123 parms->itc_busno = busno; 122 parms->itc_busno = busno;
124 parms->itc_slotno = slotno; 123 parms->itc_slotno = slotno;
125 parms->itc_virtbus = virtbus; 124 parms->itc_virtbus = virtbus;
@@ -168,7 +167,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
168} 167}
169 168
170 169
171void iommu_devnode_init_iSeries(struct device_node *dn) 170void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn)
172{ 171{
173 struct iommu_table *tbl; 172 struct iommu_table *tbl;
174 struct pci_dn *pdn = PCI_DN(dn); 173 struct pci_dn *pdn = PCI_DN(dn);
@@ -186,19 +185,14 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
186 pdn->iommu_table = iommu_init_table(tbl, -1); 185 pdn->iommu_table = iommu_init_table(tbl, -1);
187 else 186 else
188 kfree(tbl); 187 kfree(tbl);
188 pdev->dev.archdata.dma_data = pdn->iommu_table;
189} 189}
190#endif 190#endif
191 191
192static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
193static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
194
195void iommu_init_early_iSeries(void) 192void iommu_init_early_iSeries(void)
196{ 193{
197 ppc_md.tce_build = tce_build_iSeries; 194 ppc_md.tce_build = tce_build_iSeries;
198 ppc_md.tce_free = tce_free_iSeries; 195 ppc_md.tce_free = tce_free_iSeries;
199 196
200 ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries; 197 pci_dma_ops = &dma_iommu_ops;
201 ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
202
203 pci_iommu_init();
204} 198}
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
index a2200842f4e5..2430848b98e7 100644
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -19,9 +19,3 @@ EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5); 19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6); 20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7); 21EXPORT_SYMBOL(HvCall7);
22
23#ifdef CONFIG_SMP
24EXPORT_SYMBOL(local_get_flags);
25EXPORT_SYMBOL(local_irq_disable);
26EXPORT_SYMBOL(local_irq_restore);
27#endif
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index e3e929e1b460..c1f4502a3c6a 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/firmware.h>
20#include <asm/iseries/it_lp_queue.h> 21#include <asm/iseries/it_lp_queue.h>
21#include <asm/iseries/hv_lp_event.h> 22#include <asm/iseries/hv_lp_event.h>
22#include <asm/iseries/hv_call_event.h> 23#include <asm/iseries/hv_call_event.h>
@@ -318,6 +319,9 @@ static int __init proc_lpevents_init(void)
318{ 319{
319 struct proc_dir_entry *e; 320 struct proc_dir_entry *e;
320 321
322 if (!firmware_has_feature(FW_FEATURE_ISERIES))
323 return 0;
324
321 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); 325 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
322 if (e) 326 if (e)
323 e->proc_fops = &proc_lpevents_operations; 327 e->proc_fops = &proc_lpevents_operations;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index b5737d68d6c4..1ad0e4aaad1a 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -38,6 +38,7 @@
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/firmware.h>
41#include <asm/iseries/vio.h> 42#include <asm/iseries/vio.h>
42#include <asm/iseries/mf.h> 43#include <asm/iseries/mf.h>
43#include <asm/iseries/hv_lp_config.h> 44#include <asm/iseries/hv_lp_config.h>
@@ -1178,7 +1179,7 @@ static ssize_t proc_mf_change_vmlinux(struct file *file,
1178 const char __user *buf, 1179 const char __user *buf,
1179 size_t count, loff_t *ppos) 1180 size_t count, loff_t *ppos)
1180{ 1181{
1181 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); 1182 struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
1182 ssize_t rc; 1183 ssize_t rc;
1183 dma_addr_t dma_addr; 1184 dma_addr_t dma_addr;
1184 char *page; 1185 char *page;
@@ -1235,6 +1236,9 @@ static int __init mf_proc_init(void)
1235 char name[2]; 1236 char name[2];
1236 int i; 1237 int i;
1237 1238
1239 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1240 return 0;
1241
1238 mf_proc_root = proc_mkdir("iSeries/mf", NULL); 1242 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1239 if (!mf_proc_root) 1243 if (!mf_proc_root)
1240 return 1; 1244 return 1;
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 7641fc7e550a..2c6ff0fdac98 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -19,39 +19,8 @@
19 19
20 .text 20 .text
21 21
22/* unsigned long local_save_flags(void) */ 22/* Handle pending interrupts in interrupt context */
23_GLOBAL(local_get_flags) 23_GLOBAL(iseries_handle_interrupts)
24 lbz r3,PACAPROCENABLED(r13)
25 blr
26
27/* unsigned long local_irq_disable(void) */
28_GLOBAL(local_irq_disable)
29 lbz r3,PACAPROCENABLED(r13)
30 li r4,0
31 stb r4,PACAPROCENABLED(r13)
32 blr /* Done */
33
34/* void local_irq_restore(unsigned long flags) */
35_GLOBAL(local_irq_restore)
36 lbz r5,PACAPROCENABLED(r13)
37 /* Check if things are setup the way we want _already_. */
38 cmpw 0,r3,r5
39 beqlr
40 /* are we enabling interrupts? */
41 cmpdi 0,r3,0
42 stb r3,PACAPROCENABLED(r13)
43 beqlr
44 /* Check pending interrupts */
45 /* A decrementer, IPI or PMC interrupt may have occurred
46 * while we were in the hypervisor (which enables) */
47 ld r4,PACALPPACAPTR(r13)
48 ld r4,LPPACAANYINT(r4)
49 cmpdi r4,0
50 beqlr
51
52 /*
53 * Handle pending interrupts in interrupt context
54 */
55 li r0,0x5555 24 li r0,0x5555
56 sc 25 sc
57 blr 26 blr
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 4aa165e010d9..4a06d9c34986 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -156,53 +156,6 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
156} 156}
157 157
158/* 158/*
159 * iSeries_pcibios_init
160 *
161 * Description:
162 * This function checks for all possible system PCI host bridges that connect
163 * PCI buses. The system hypervisor is queried as to the guest partition
164 * ownership status. A pci_controller is built for any bus which is partially
165 * owned or fully owned by this guest partition.
166 */
167void iSeries_pcibios_init(void)
168{
169 struct pci_controller *phb;
170 struct device_node *root = of_find_node_by_path("/");
171 struct device_node *node = NULL;
172
173 if (root == NULL) {
174 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
175 "of device tree\n");
176 return;
177 }
178 while ((node = of_get_next_child(root, node)) != NULL) {
179 HvBusNumber bus;
180 const u32 *busp;
181
182 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
183 continue;
184
185 busp = get_property(node, "bus-range", NULL);
186 if (busp == NULL)
187 continue;
188 bus = *busp;
189 printk("bus %d appears to exist\n", bus);
190 phb = pcibios_alloc_controller(node);
191 if (phb == NULL)
192 continue;
193
194 phb->pci_mem_offset = phb->local_number = bus;
195 phb->first_busno = bus;
196 phb->last_busno = bus;
197 phb->ops = &iSeries_pci_ops;
198 }
199
200 of_node_put(root);
201
202 pci_devs_phb_init();
203}
204
205/*
206 * iSeries_pci_final_fixup(void) 159 * iSeries_pci_final_fixup(void)
207 */ 160 */
208void __init iSeries_pci_final_fixup(void) 161void __init iSeries_pci_final_fixup(void)
@@ -253,7 +206,7 @@ void __init iSeries_pci_final_fixup(void)
253 PCI_DN(node)->pcidev = pdev; 206 PCI_DN(node)->pcidev = pdev;
254 allocate_device_bars(pdev); 207 allocate_device_bars(pdev);
255 iSeries_Device_Information(pdev, DeviceCount); 208 iSeries_Device_Information(pdev, DeviceCount);
256 iommu_devnode_init_iSeries(node); 209 iommu_devnode_init_iSeries(pdev, node);
257 } else 210 } else
258 printk("PCI: Device Tree not found for 0x%016lX\n", 211 printk("PCI: Device Tree not found for 0x%016lX\n",
259 (unsigned long)pdev); 212 (unsigned long)pdev);
@@ -438,11 +391,7 @@ static inline struct device_node *xlate_iomm_address(
438/* 391/*
439 * Read MM I/O Instructions for the iSeries 392 * Read MM I/O Instructions for the iSeries
440 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal 393 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
441 * else, data is returned in big Endian format. 394 * else, data is returned in Big Endian format.
442 *
443 * iSeries_Read_Byte = Read Byte ( 8 bit)
444 * iSeries_Read_Word = Read Word (16 bit)
445 * iSeries_Read_Long = Read Long (32 bit)
446 */ 395 */
447static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) 396static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
448{ 397{
@@ -462,14 +411,15 @@ static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
462 num_printed = 0; 411 num_printed = 0;
463 } 412 }
464 if (num_printed++ < 10) 413 if (num_printed++ < 10)
465 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress); 414 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n",
415 IoAddress);
466 return 0xff; 416 return 0xff;
467 } 417 }
468 do { 418 do {
469 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); 419 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
470 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); 420 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
471 421
472 return (u8)ret.value; 422 return ret.value;
473} 423}
474 424
475static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) 425static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
@@ -490,7 +440,8 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
490 num_printed = 0; 440 num_printed = 0;
491 } 441 }
492 if (num_printed++ < 10) 442 if (num_printed++ < 10)
493 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress); 443 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n",
444 IoAddress);
494 return 0xffff; 445 return 0xffff;
495 } 446 }
496 do { 447 do {
@@ -498,7 +449,7 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
498 BarOffset, 0); 449 BarOffset, 0);
499 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); 450 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
500 451
501 return swab16((u16)ret.value); 452 return ret.value;
502} 453}
503 454
504static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) 455static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
@@ -519,7 +470,8 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
519 num_printed = 0; 470 num_printed = 0;
520 } 471 }
521 if (num_printed++ < 10) 472 if (num_printed++ < 10)
522 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress); 473 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n",
474 IoAddress);
523 return 0xffffffff; 475 return 0xffffffff;
524 } 476 }
525 do { 477 do {
@@ -527,15 +479,12 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
527 BarOffset, 0); 479 BarOffset, 0);
528 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); 480 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
529 481
530 return swab32((u32)ret.value); 482 return ret.value;
531} 483}
532 484
533/* 485/*
534 * Write MM I/O Instructions for the iSeries 486 * Write MM I/O Instructions for the iSeries
535 * 487 *
536 * iSeries_Write_Byte = Write Byte (8 bit)
537 * iSeries_Write_Word = Write Word(16 bit)
538 * iSeries_Write_Long = Write Long(32 bit)
539 */ 488 */
540static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) 489static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
541{ 490{
@@ -581,11 +530,12 @@ static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
581 num_printed = 0; 530 num_printed = 0;
582 } 531 }
583 if (num_printed++ < 10) 532 if (num_printed++ < 10)
584 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress); 533 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n",
534 IoAddress);
585 return; 535 return;
586 } 536 }
587 do { 537 do {
588 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); 538 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0);
589 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); 539 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
590} 540}
591 541
@@ -607,231 +557,221 @@ static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
607 num_printed = 0; 557 num_printed = 0;
608 } 558 }
609 if (num_printed++ < 10) 559 if (num_printed++ < 10)
610 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress); 560 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n",
561 IoAddress);
611 return; 562 return;
612 } 563 }
613 do { 564 do {
614 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); 565 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0);
615 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); 566 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
616} 567}
617 568
618extern unsigned char __raw_readb(const volatile void __iomem *addr) 569static u8 iseries_readb(const volatile void __iomem *addr)
619{ 570{
620 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 571 return iSeries_Read_Byte(addr);
621
622 return *(volatile unsigned char __force *)addr;
623} 572}
624EXPORT_SYMBOL(__raw_readb);
625 573
626extern unsigned short __raw_readw(const volatile void __iomem *addr) 574static u16 iseries_readw(const volatile void __iomem *addr)
627{ 575{
628 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 576 return le16_to_cpu(iSeries_Read_Word(addr));
629
630 return *(volatile unsigned short __force *)addr;
631} 577}
632EXPORT_SYMBOL(__raw_readw);
633 578
634extern unsigned int __raw_readl(const volatile void __iomem *addr) 579static u32 iseries_readl(const volatile void __iomem *addr)
635{ 580{
636 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 581 return le32_to_cpu(iSeries_Read_Long(addr));
637
638 return *(volatile unsigned int __force *)addr;
639} 582}
640EXPORT_SYMBOL(__raw_readl);
641 583
642extern unsigned long __raw_readq(const volatile void __iomem *addr) 584static u16 iseries_readw_be(const volatile void __iomem *addr)
643{ 585{
644 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 586 return iSeries_Read_Word(addr);
645
646 return *(volatile unsigned long __force *)addr;
647} 587}
648EXPORT_SYMBOL(__raw_readq);
649 588
650extern void __raw_writeb(unsigned char v, volatile void __iomem *addr) 589static u32 iseries_readl_be(const volatile void __iomem *addr)
651{ 590{
652 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 591 return iSeries_Read_Long(addr);
653
654 *(volatile unsigned char __force *)addr = v;
655} 592}
656EXPORT_SYMBOL(__raw_writeb);
657 593
658extern void __raw_writew(unsigned short v, volatile void __iomem *addr) 594static void iseries_writeb(u8 data, volatile void __iomem *addr)
659{ 595{
660 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 596 iSeries_Write_Byte(data, addr);
661
662 *(volatile unsigned short __force *)addr = v;
663} 597}
664EXPORT_SYMBOL(__raw_writew);
665 598
666extern void __raw_writel(unsigned int v, volatile void __iomem *addr) 599static void iseries_writew(u16 data, volatile void __iomem *addr)
667{ 600{
668 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 601 iSeries_Write_Word(cpu_to_le16(data), addr);
669
670 *(volatile unsigned int __force *)addr = v;
671} 602}
672EXPORT_SYMBOL(__raw_writel);
673 603
674extern void __raw_writeq(unsigned long v, volatile void __iomem *addr) 604static void iseries_writel(u32 data, volatile void __iomem *addr)
675{ 605{
676 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 606 iSeries_Write_Long(cpu_to_le32(data), addr);
677
678 *(volatile unsigned long __force *)addr = v;
679} 607}
680EXPORT_SYMBOL(__raw_writeq);
681 608
682int in_8(const volatile unsigned char __iomem *addr) 609static void iseries_writew_be(u16 data, volatile void __iomem *addr)
683{ 610{
684 if (firmware_has_feature(FW_FEATURE_ISERIES)) 611 iSeries_Write_Word(data, addr);
685 return iSeries_Read_Byte(addr);
686 return __in_8(addr);
687} 612}
688EXPORT_SYMBOL(in_8);
689 613
690void out_8(volatile unsigned char __iomem *addr, int val) 614static void iseries_writel_be(u32 data, volatile void __iomem *addr)
691{ 615{
692 if (firmware_has_feature(FW_FEATURE_ISERIES)) 616 iSeries_Write_Long(data, addr);
693 iSeries_Write_Byte(val, addr);
694 else
695 __out_8(addr, val);
696} 617}
697EXPORT_SYMBOL(out_8);
698 618
699int in_le16(const volatile unsigned short __iomem *addr) 619static void iseries_readsb(const volatile void __iomem *addr, void *buf,
620 unsigned long count)
700{ 621{
701 if (firmware_has_feature(FW_FEATURE_ISERIES)) 622 u8 *dst = buf;
702 return iSeries_Read_Word(addr); 623 while(count-- > 0)
703 return __in_le16(addr); 624 *(dst++) = iSeries_Read_Byte(addr);
704} 625}
705EXPORT_SYMBOL(in_le16);
706 626
707int in_be16(const volatile unsigned short __iomem *addr) 627static void iseries_readsw(const volatile void __iomem *addr, void *buf,
628 unsigned long count)
708{ 629{
709 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 630 u16 *dst = buf;
710 631 while(count-- > 0)
711 return __in_be16(addr); 632 *(dst++) = iSeries_Read_Word(addr);
712} 633}
713EXPORT_SYMBOL(in_be16);
714 634
715void out_le16(volatile unsigned short __iomem *addr, int val) 635static void iseries_readsl(const volatile void __iomem *addr, void *buf,
636 unsigned long count)
716{ 637{
717 if (firmware_has_feature(FW_FEATURE_ISERIES)) 638 u32 *dst = buf;
718 iSeries_Write_Word(val, addr); 639 while(count-- > 0)
719 else 640 *(dst++) = iSeries_Read_Long(addr);
720 __out_le16(addr, val);
721} 641}
722EXPORT_SYMBOL(out_le16);
723 642
724void out_be16(volatile unsigned short __iomem *addr, int val) 643static void iseries_writesb(volatile void __iomem *addr, const void *buf,
644 unsigned long count)
725{ 645{
726 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 646 const u8 *src = buf;
727 647 while(count-- > 0)
728 __out_be16(addr, val); 648 iSeries_Write_Byte(*(src++), addr);
729} 649}
730EXPORT_SYMBOL(out_be16);
731 650
732unsigned in_le32(const volatile unsigned __iomem *addr) 651static void iseries_writesw(volatile void __iomem *addr, const void *buf,
652 unsigned long count)
733{ 653{
734 if (firmware_has_feature(FW_FEATURE_ISERIES)) 654 const u16 *src = buf;
735 return iSeries_Read_Long(addr); 655 while(count-- > 0)
736 return __in_le32(addr); 656 iSeries_Write_Word(*(src++), addr);
737} 657}
738EXPORT_SYMBOL(in_le32);
739 658
740unsigned in_be32(const volatile unsigned __iomem *addr) 659static void iseries_writesl(volatile void __iomem *addr, const void *buf,
660 unsigned long count)
741{ 661{
742 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 662 const u32 *src = buf;
743 663 while(count-- > 0)
744 return __in_be32(addr); 664 iSeries_Write_Long(*(src++), addr);
745} 665}
746EXPORT_SYMBOL(in_be32);
747 666
748void out_le32(volatile unsigned __iomem *addr, int val) 667static void iseries_memset_io(volatile void __iomem *addr, int c,
668 unsigned long n)
749{ 669{
750 if (firmware_has_feature(FW_FEATURE_ISERIES)) 670 volatile char __iomem *d = addr;
751 iSeries_Write_Long(val, addr);
752 else
753 __out_le32(addr, val);
754}
755EXPORT_SYMBOL(out_le32);
756 671
757void out_be32(volatile unsigned __iomem *addr, int val) 672 while (n-- > 0)
758{ 673 iSeries_Write_Byte(c, d++);
759 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
760
761 __out_be32(addr, val);
762} 674}
763EXPORT_SYMBOL(out_be32);
764 675
765unsigned long in_le64(const volatile unsigned long __iomem *addr) 676static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
677 unsigned long n)
766{ 678{
767 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 679 char *d = dest;
680 const volatile char __iomem *s = src;
768 681
769 return __in_le64(addr); 682 while (n-- > 0)
683 *d++ = iSeries_Read_Byte(s++);
770} 684}
771EXPORT_SYMBOL(in_le64);
772 685
773unsigned long in_be64(const volatile unsigned long __iomem *addr) 686static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
687 unsigned long n)
774{ 688{
775 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 689 const char *s = src;
690 volatile char __iomem *d = dest;
776 691
777 return __in_be64(addr); 692 while (n-- > 0)
693 iSeries_Write_Byte(*s++, d++);
778} 694}
779EXPORT_SYMBOL(in_be64);
780
781void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
782{
783 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
784 695
785 __out_le64(addr, val); 696/* We only set MMIO ops. The default PIO ops will be default
786} 697 * to the MMIO ops + pci_io_base which is 0 on iSeries as
787EXPORT_SYMBOL(out_le64); 698 * expected so both should work.
699 *
700 * Note that we don't implement the readq/writeq versions as
701 * I don't know of an HV call for doing so. Thus, the default
702 * operation will be used instead, which will fault a the value
703 * return by iSeries for MMIO addresses always hits a non mapped
704 * area. This is as good as the BUG() we used to have there.
705 */
706static struct ppc_pci_io __initdata iseries_pci_io = {
707 .readb = iseries_readb,
708 .readw = iseries_readw,
709 .readl = iseries_readl,
710 .readw_be = iseries_readw_be,
711 .readl_be = iseries_readl_be,
712 .writeb = iseries_writeb,
713 .writew = iseries_writew,
714 .writel = iseries_writel,
715 .writew_be = iseries_writew_be,
716 .writel_be = iseries_writel_be,
717 .readsb = iseries_readsb,
718 .readsw = iseries_readsw,
719 .readsl = iseries_readsl,
720 .writesb = iseries_writesb,
721 .writesw = iseries_writesw,
722 .writesl = iseries_writesl,
723 .memset_io = iseries_memset_io,
724 .memcpy_fromio = iseries_memcpy_fromio,
725 .memcpy_toio = iseries_memcpy_toio,
726};
788 727
789void out_be64(volatile unsigned long __iomem *addr, unsigned long val) 728/*
729 * iSeries_pcibios_init
730 *
731 * Description:
732 * This function checks for all possible system PCI host bridges that connect
733 * PCI buses. The system hypervisor is queried as to the guest partition
734 * ownership status. A pci_controller is built for any bus which is partially
735 * owned or fully owned by this guest partition.
736 */
737void __init iSeries_pcibios_init(void)
790{ 738{
791 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); 739 struct pci_controller *phb;
740 struct device_node *root = of_find_node_by_path("/");
741 struct device_node *node = NULL;
792 742
793 __out_be64(addr, val); 743 /* Install IO hooks */
794} 744 ppc_pci_io = iseries_pci_io;
795EXPORT_SYMBOL(out_be64);
796 745
797void memset_io(volatile void __iomem *addr, int c, unsigned long n) 746 if (root == NULL) {
798{ 747 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
799 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 748 "of device tree\n");
800 volatile char __iomem *d = addr; 749 return;
750 }
751 while ((node = of_get_next_child(root, node)) != NULL) {
752 HvBusNumber bus;
753 const u32 *busp;
801 754
802 while (n-- > 0) { 755 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
803 iSeries_Write_Byte(c, d++); 756 continue;
804 }
805 } else
806 eeh_memset_io(addr, c, n);
807}
808EXPORT_SYMBOL(memset_io);
809 757
810void memcpy_fromio(void *dest, const volatile void __iomem *src, 758 busp = get_property(node, "bus-range", NULL);
811 unsigned long n) 759 if (busp == NULL)
812{ 760 continue;
813 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 761 bus = *busp;
814 char *d = dest; 762 printk("bus %d appears to exist\n", bus);
815 const volatile char __iomem *s = src; 763 phb = pcibios_alloc_controller(node);
764 if (phb == NULL)
765 continue;
816 766
817 while (n-- > 0) { 767 phb->pci_mem_offset = phb->local_number = bus;
818 *d++ = iSeries_Read_Byte(s++); 768 phb->first_busno = bus;
819 } 769 phb->last_busno = bus;
820 } else 770 phb->ops = &iSeries_pci_ops;
821 eeh_memcpy_fromio(dest, src, n); 771 }
822}
823EXPORT_SYMBOL(memcpy_fromio);
824 772
825void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) 773 of_node_put(root);
826{
827 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
828 const char *s = src;
829 volatile char __iomem *d = dest;
830 774
831 while (n-- > 0) { 775 pci_devs_phb_init();
832 iSeries_Write_Byte(*s++, d++);
833 }
834 } else
835 eeh_memcpy_toio(dest, src, n);
836} 776}
837EXPORT_SYMBOL(memcpy_toio); 777
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
index c241413629ac..b54e37101e69 100644
--- a/arch/powerpc/platforms/iseries/proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -24,6 +24,7 @@
24#include <asm/processor.h> 24#include <asm/processor.h>
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/lppaca.h> 26#include <asm/lppaca.h>
27#include <asm/firmware.h>
27#include <asm/iseries/hv_call_xm.h> 28#include <asm/iseries/hv_call_xm.h>
28 29
29#include "processor_vpd.h" 30#include "processor_vpd.h"
@@ -31,7 +32,12 @@
31 32
32static int __init iseries_proc_create(void) 33static int __init iseries_proc_create(void)
33{ 34{
34 struct proc_dir_entry *e = proc_mkdir("iSeries", 0); 35 struct proc_dir_entry *e;
36
37 if (!firmware_has_feature(FW_FEATURE_ISERIES))
38 return 0;
39
40 e = proc_mkdir("iSeries", 0);
35 if (!e) 41 if (!e)
36 return 1; 42 return 1;
37 43
@@ -106,6 +112,9 @@ static int __init iseries_proc_init(void)
106{ 112{
107 struct proc_dir_entry *e; 113 struct proc_dir_entry *e;
108 114
115 if (!firmware_has_feature(FW_FEATURE_ISERIES))
116 return 0;
117
109 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); 118 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
110 if (e) 119 if (e)
111 e->proc_fops = &proc_titantod_operations; 120 e->proc_fops = &proc_titantod_operations;
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 6f73469fd3b0..cce7e309340c 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -21,7 +21,6 @@
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/param.h> 22#include <linux/param.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/initrd.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/kdev_t.h> 25#include <linux/kdev_t.h>
27#include <linux/major.h> 26#include <linux/major.h>
@@ -80,8 +79,6 @@ extern void iSeries_pci_final_fixup(void);
80static void iSeries_pci_final_fixup(void) { } 79static void iSeries_pci_final_fixup(void) { }
81#endif 80#endif
82 81
83extern int rd_size; /* Defined in drivers/block/rd.c */
84
85extern unsigned long iSeries_recal_tb; 82extern unsigned long iSeries_recal_tb;
86extern unsigned long iSeries_recal_titan; 83extern unsigned long iSeries_recal_titan;
87 84
@@ -295,24 +292,6 @@ static void __init iSeries_init_early(void)
295{ 292{
296 DBG(" -> iSeries_init_early()\n"); 293 DBG(" -> iSeries_init_early()\n");
297 294
298#if defined(CONFIG_BLK_DEV_INITRD)
299 /*
300 * If the init RAM disk has been configured and there is
301 * a non-zero starting address for it, set it up
302 */
303 if (naca.xRamDisk) {
304 initrd_start = (unsigned long)__va(naca.xRamDisk);
305 initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
306 initrd_below_start_ok = 1; // ramdisk in kernel space
307 ROOT_DEV = Root_RAM0;
308 if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
309 rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
310 } else
311#endif /* CONFIG_BLK_DEV_INITRD */
312 {
313 /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
314 }
315
316 iSeries_recal_tb = get_tb(); 295 iSeries_recal_tb = get_tb();
317 iSeries_recal_titan = HvCallXm_loadTod(); 296 iSeries_recal_titan = HvCallXm_loadTod();
318 297
@@ -331,17 +310,6 @@ static void __init iSeries_init_early(void)
331 310
332 mf_init(); 311 mf_init();
333 312
334 /* If we were passed an initrd, set the ROOT_DEV properly if the values
335 * look sensible. If not, clear initrd reference.
336 */
337#ifdef CONFIG_BLK_DEV_INITRD
338 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
339 initrd_end > initrd_start)
340 ROOT_DEV = Root_RAM0;
341 else
342 initrd_start = initrd_end = 0;
343#endif /* CONFIG_BLK_DEV_INITRD */
344
345 DBG(" <- iSeries_init_early()\n"); 313 DBG(" <- iSeries_init_early()\n");
346} 314}
347 315
@@ -559,7 +527,8 @@ static void __init iSeries_fixup_klimit(void)
559static int __init iSeries_src_init(void) 527static int __init iSeries_src_init(void)
560{ 528{
561 /* clear the progress line */ 529 /* clear the progress line */
562 ppc_md.progress(" ", 0xffff); 530 if (firmware_has_feature(FW_FEATURE_ISERIES))
531 ppc_md.progress(" ", 0xffff);
563 return 0; 532 return 0;
564} 533}
565 534
@@ -649,6 +618,16 @@ static void iseries_dedicated_idle(void)
649void __init iSeries_init_IRQ(void) { } 618void __init iSeries_init_IRQ(void) { }
650#endif 619#endif
651 620
621static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
622 unsigned long flags)
623{
624 return (void __iomem *)address;
625}
626
627static void iseries_iounmap(volatile void __iomem *token)
628{
629}
630
652/* 631/*
653 * iSeries has no legacy IO, anything calling this function has to 632 * iSeries has no legacy IO, anything calling this function has to
654 * fail or bad things will happen 633 * fail or bad things will happen
@@ -665,6 +644,8 @@ static int __init iseries_probe(void)
665 return 0; 644 return 0;
666 645
667 hpte_init_iSeries(); 646 hpte_init_iSeries();
647 /* iSeries does not support 16M pages */
648 cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE;
668 649
669 return 1; 650 return 1;
670} 651}
@@ -687,6 +668,8 @@ define_machine(iseries) {
687 .progress = iSeries_progress, 668 .progress = iSeries_progress,
688 .probe = iseries_probe, 669 .probe = iseries_probe,
689 .check_legacy_ioport = iseries_check_legacy_ioport, 670 .check_legacy_ioport = iseries_check_legacy_ioport,
671 .ioremap = iseries_ioremap,
672 .iounmap = iseries_iounmap,
690 /* XXX Implement enable_pmcs for iSeries */ 673 /* XXX Implement enable_pmcs for iSeries */
691}; 674};
692 675
@@ -697,7 +680,7 @@ void * __init iSeries_early_setup(void)
697 /* Identify CPU type. This is done again by the common code later 680 /* Identify CPU type. This is done again by the common code later
698 * on but calling this function multiple times is fine. 681 * on but calling this function multiple times is fine.
699 */ 682 */
700 identify_cpu(0); 683 identify_cpu(0, mfspr(SPRN_PVR));
701 684
702 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
703 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 04e07e5da0c1..a6799ed34a66 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -42,6 +42,7 @@
42#include <asm/system.h> 42#include <asm/system.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/prom.h> 44#include <asm/prom.h>
45#include <asm/firmware.h>
45#include <asm/iseries/hv_types.h> 46#include <asm/iseries/hv_types.h>
46#include <asm/iseries/hv_lp_event.h> 47#include <asm/iseries/hv_lp_event.h>
47#include <asm/iseries/hv_lp_config.h> 48#include <asm/iseries/hv_lp_config.h>
@@ -119,10 +120,9 @@ static int proc_viopath_show(struct seq_file *m, void *v)
119 struct device_node *node; 120 struct device_node *node;
120 const char *sysid; 121 const char *sysid;
121 122
122 buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL); 123 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
123 if (!buf) 124 if (!buf)
124 return 0; 125 return 0;
125 memset(buf, 0, HW_PAGE_SIZE);
126 126
127 handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE, 127 handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
128 DMA_FROM_DEVICE); 128 DMA_FROM_DEVICE);
@@ -184,6 +184,9 @@ static int __init vio_proc_init(void)
184{ 184{
185 struct proc_dir_entry *e; 185 struct proc_dir_entry *e;
186 186
187 if (!firmware_has_feature(FW_FEATURE_ISERIES))
188 return 0;
189
187 e = create_proc_entry("iSeries/config", 0, NULL); 190 e = create_proc_entry("iSeries/config", 0, NULL);
188 if (e) 191 if (e)
189 e->proc_fops = &proc_viopath_operations; 192 e->proc_fops = &proc_viopath_operations;
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index 0657c579b840..c6911ddc479f 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -8,5 +8,5 @@ extern void maple_get_rtc_time(struct rtc_time *tm);
8extern unsigned long maple_get_boot_time(void); 8extern unsigned long maple_get_boot_time(void);
9extern void maple_calibrate_decr(void); 9extern void maple_calibrate_decr(void);
10extern void maple_pci_init(void); 10extern void maple_pci_init(void);
11extern void maple_pcibios_fixup(void); 11extern void maple_pci_irq_fixup(struct pci_dev *dev);
12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); 12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 63b4d1bff359..3f6a69f67195 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -502,38 +502,29 @@ static int __init add_bridge(struct device_node *dev)
502} 502}
503 503
504 504
505void __init maple_pcibios_fixup(void) 505void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
506{ 506{
507 struct pci_dev *dev = NULL; 507 DBG(" -> maple_pci_irq_fixup\n");
508 508
509 DBG(" -> maple_pcibios_fixup\n"); 509 /* Fixup IRQ for PCIe host */
510 510 if (u4_pcie != NULL && dev->bus->number == 0 &&
511 for_each_pci_dev(dev) { 511 pci_bus_to_host(dev->bus) == u4_pcie) {
512 /* Fixup IRQ for PCIe host */ 512 printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
513 if (u4_pcie != NULL && dev->bus->number == 0 && 513 dev->irq = irq_create_mapping(NULL, 1);
514 pci_bus_to_host(dev->bus) == u4_pcie) { 514 if (dev->irq != NO_IRQ)
515 printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); 515 set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
516 dev->irq = irq_create_mapping(NULL, 1); 516 }
517 if (dev->irq != NO_IRQ)
518 set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
519 continue;
520 }
521
522 /* Hide AMD8111 IDE interrupt when in legacy mode so
523 * the driver calls pci_get_legacy_ide_irq()
524 */
525 if (dev->vendor == PCI_VENDOR_ID_AMD &&
526 dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
527 (dev->class & 5) != 5) {
528 dev->irq = NO_IRQ;
529 continue;
530 }
531 517
532 /* For all others, map the interrupt from the device-tree */ 518 /* Hide AMD8111 IDE interrupt when in legacy mode so
533 pci_read_irq_line(dev); 519 * the driver calls pci_get_legacy_ide_irq()
520 */
521 if (dev->vendor == PCI_VENDOR_ID_AMD &&
522 dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
523 (dev->class & 5) != 5) {
524 dev->irq = NO_IRQ;
534 } 525 }
535 526
536 DBG(" <- maple_pcibios_fixup\n"); 527 DBG(" <- maple_pci_irq_fixup\n");
537} 528}
538 529
539static void __init maple_fixup_phb_resources(void) 530static void __init maple_fixup_phb_resources(void)
@@ -571,7 +562,7 @@ void __init maple_pci_init(void)
571 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { 562 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
572 if (np->name == NULL) 563 if (np->name == NULL)
573 continue; 564 continue;
574 if (strcmp(np->name, "pci") == 0) { 565 if (!strcmp(np->name, "pci") || !strcmp(np->name, "pcie")) {
575 if (add_bridge(np) == 0) 566 if (add_bridge(np) == 0)
576 of_node_get(np); 567 of_node_get(np);
577 } 568 }
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index fe6b9bff61b9..50855d4fd5a0 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -60,6 +60,7 @@
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/lmb.h> 61#include <asm/lmb.h>
62#include <asm/mpic.h> 62#include <asm/mpic.h>
63#include <asm/rtas.h>
63#include <asm/udbg.h> 64#include <asm/udbg.h>
64 65
65#include "maple.h" 66#include "maple.h"
@@ -166,6 +167,16 @@ struct smp_ops_t maple_smp_ops = {
166}; 167};
167#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
168 169
170static void __init maple_use_rtas_reboot_and_halt_if_present(void)
171{
172 if (rtas_service_present("system-reboot") &&
173 rtas_service_present("power-off")) {
174 ppc_md.restart = rtas_restart;
175 ppc_md.power_off = rtas_power_off;
176 ppc_md.halt = rtas_halt;
177 }
178}
179
169void __init maple_setup_arch(void) 180void __init maple_setup_arch(void)
170{ 181{
171 /* init to some ~sane value until calibrate_delay() runs */ 182 /* init to some ~sane value until calibrate_delay() runs */
@@ -181,6 +192,7 @@ void __init maple_setup_arch(void)
181#ifdef CONFIG_DUMMY_CONSOLE 192#ifdef CONFIG_DUMMY_CONSOLE
182 conswitchp = &dummy_con; 193 conswitchp = &dummy_con;
183#endif 194#endif
195 maple_use_rtas_reboot_and_halt_if_present();
184 196
185 printk(KERN_DEBUG "Using native/NAP idle loop\n"); 197 printk(KERN_DEBUG "Using native/NAP idle loop\n");
186} 198}
@@ -242,7 +254,6 @@ static void __init maple_init_IRQ(void)
242 printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", 254 printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
243 openpic_addr, has_isus); 255 openpic_addr, has_isus);
244 } 256 }
245 of_node_put(root);
246 257
247 BUG_ON(openpic_addr == 0); 258 BUG_ON(openpic_addr == 0);
248 259
@@ -312,7 +323,7 @@ define_machine(maple_md) {
312 .setup_arch = maple_setup_arch, 323 .setup_arch = maple_setup_arch,
313 .init_early = maple_init_early, 324 .init_early = maple_init_early,
314 .init_IRQ = maple_init_IRQ, 325 .init_IRQ = maple_init_IRQ,
315 .pcibios_fixup = maple_pcibios_fixup, 326 .pci_irq_fixup = maple_pci_irq_fixup,
316 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, 327 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
317 .restart = maple_restart, 328 .restart = maple_restart,
318 .power_off = maple_power_off, 329 .power_off = maple_power_off,
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index fd71d72736b2..51c2a2397ecf 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -3,6 +3,5 @@
3 3
4extern unsigned long pas_get_boot_time(void); 4extern unsigned long pas_get_boot_time(void);
5extern void pas_pci_init(void); 5extern void pas_pci_init(void);
6extern void pas_pcibios_fixup(void);
7 6
8#endif /* _PASEMI_PASEMI_H */ 7#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 39020c1fa13d..faa618e04047 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -148,14 +148,6 @@ static int __init add_bridge(struct device_node *dev)
148} 148}
149 149
150 150
151void __init pas_pcibios_fixup(void)
152{
153 struct pci_dev *dev = NULL;
154
155 for_each_pci_dev(dev)
156 pci_read_irq_line(dev);
157}
158
159static void __init pas_fixup_phb_resources(void) 151static void __init pas_fixup_phb_resources(void)
160{ 152{
161 struct pci_controller *hose, *tmp; 153 struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 106896c3b60a..bea7d1bb1a3b 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/pci.h>
29 30
30#include <asm/prom.h> 31#include <asm/prom.h>
31#include <asm/system.h> 32#include <asm/system.h>
@@ -71,6 +72,9 @@ void __init pas_setup_arch(void)
71 /* Setup SMP callback */ 72 /* Setup SMP callback */
72 smp_ops = &pas_smp_ops; 73 smp_ops = &pas_smp_ops;
73#endif 74#endif
75 /* no iommu yet */
76 pci_dma_ops = &dma_direct_ops;
77
74 /* Lookup PCI hosts */ 78 /* Lookup PCI hosts */
75 pas_pci_init(); 79 pas_pci_init();
76 80
@@ -81,17 +85,6 @@ void __init pas_setup_arch(void)
81 printk(KERN_DEBUG "Using default idle loop\n"); 85 printk(KERN_DEBUG "Using default idle loop\n");
82} 86}
83 87
84static void iommu_dev_setup_null(struct pci_dev *dev) { }
85static void iommu_bus_setup_null(struct pci_bus *bus) { }
86
87static void __init pas_init_early(void)
88{
89 /* No iommu code yet */
90 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
91 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
92 pci_direct_iommu_init();
93}
94
95/* No legacy IO on our parts */ 88/* No legacy IO on our parts */
96static int pas_check_legacy_ioport(unsigned int baseport) 89static int pas_check_legacy_ioport(unsigned int baseport)
97{ 90{
@@ -136,7 +129,6 @@ static __init void pas_init_IRQ(void)
136 } 129 }
137 openpic_addr = of_read_number(opprop, naddr); 130 openpic_addr = of_read_number(opprop, naddr);
138 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 131 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
139 of_node_put(root);
140 132
141 mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0, 133 mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0,
142 " PAS-OPIC "); 134 " PAS-OPIC ");
@@ -173,10 +165,8 @@ define_machine(pas) {
173 .name = "PA Semi PA6T-1682M", 165 .name = "PA Semi PA6T-1682M",
174 .probe = pas_probe, 166 .probe = pas_probe,
175 .setup_arch = pas_setup_arch, 167 .setup_arch = pas_setup_arch,
176 .init_early = pas_init_early,
177 .init_IRQ = pas_init_IRQ, 168 .init_IRQ = pas_init_IRQ,
178 .get_irq = mpic_get_irq, 169 .get_irq = mpic_get_irq,
179 .pcibios_fixup = pas_pcibios_fixup,
180 .restart = pas_restart, 170 .restart = pas_restart,
181 .power_off = pas_power_off, 171 .power_off = pas_power_off,
182 .halt = pas_halt, 172 .halt = pas_halt,
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a8544a..c3a89414ddc0 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
18 18
19#define OLD_BACKLIGHT_MAX 15 19#define OLD_BACKLIGHT_MAX 15
20 20
21static void pmac_backlight_key_worker(void *data); 21static void pmac_backlight_key_worker(struct work_struct *work);
22static void pmac_backlight_set_legacy_worker(void *data); 22static void pmac_backlight_set_legacy_worker(struct work_struct *work);
23 23
24static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); 24static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
25static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); 25static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
26 26
27/* Although these variables are used in interrupt context, it makes no sense to 27/* Although these variables are used in interrupt context, it makes no sense to
28 * protect them. No user is able to produce enough key events per second and 28 * protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
94 return level; 94 return level;
95} 95}
96 96
97static void pmac_backlight_key_worker(void *data) 97static void pmac_backlight_key_worker(struct work_struct *work)
98{ 98{
99 if (atomic_read(&kernel_backlight_disabled)) 99 if (atomic_read(&kernel_backlight_disabled))
100 return; 100 return;
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
166 return error; 166 return error;
167} 167}
168 168
169static void pmac_backlight_set_legacy_worker(void *data) 169static void pmac_backlight_set_legacy_worker(struct work_struct *work)
170{ 170{
171 if (atomic_read(&kernel_backlight_disabled)) 171 if (atomic_read(&kernel_backlight_disabled))
172 return; 172 return;
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index e49621be6640..c29a6a064d22 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -486,10 +486,6 @@ static long heathrow_sound_enable(struct device_node *node, long param,
486 486
487static u32 save_fcr[6]; 487static u32 save_fcr[6];
488static u32 save_mbcr; 488static u32 save_mbcr;
489static u32 save_gpio_levels[2];
490static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
491static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
492static u32 save_unin_clock_ctl;
493static struct dbdma_regs save_dbdma[13]; 489static struct dbdma_regs save_dbdma[13];
494static struct dbdma_regs save_alt_dbdma[13]; 490static struct dbdma_regs save_alt_dbdma[13];
495 491
@@ -1548,6 +1544,10 @@ void g5_phy_disable_cpu1(void)
1548 1544
1549 1545
1550#ifdef CONFIG_PM 1546#ifdef CONFIG_PM
1547static u32 save_gpio_levels[2];
1548static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
1549static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
1550static u32 save_unin_clock_ctl;
1551 1551
1552static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode) 1552static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1553{ 1553{
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 9923adc5248e..f42475b27c15 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -48,7 +48,6 @@ static struct pci_controller *u3_ht;
48static int has_second_ohare; 48static int has_second_ohare;
49#endif /* CONFIG_PPC64 */ 49#endif /* CONFIG_PPC64 */
50 50
51extern u8 pci_cache_line_size;
52extern int pcibios_assign_bus_offset; 51extern int pcibios_assign_bus_offset;
53 52
54struct device_node *k2_skiplist[2]; 53struct device_node *k2_skiplist[2];
@@ -985,30 +984,23 @@ static int __init add_bridge(struct device_node *dev)
985 return 0; 984 return 0;
986} 985}
987 986
988void __init pmac_pcibios_fixup(void) 987void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
989{ 988{
990 struct pci_dev* dev = NULL;
991
992 for_each_pci_dev(dev) {
993 /* Read interrupt from the device-tree */
994 pci_read_irq_line(dev);
995
996#ifdef CONFIG_PPC32 989#ifdef CONFIG_PPC32
997 /* Fixup interrupt for the modem/ethernet combo controller. 990 /* Fixup interrupt for the modem/ethernet combo controller.
998 * on machines with a second ohare chip. 991 * on machines with a second ohare chip.
999 * The number in the device tree (27) is bogus (correct for 992 * The number in the device tree (27) is bogus (correct for
1000 * the ethernet-only board but not the combo ethernet/modem 993 * the ethernet-only board but not the combo ethernet/modem
1001 * board). The real interrupt is 28 on the second controller 994 * board). The real interrupt is 28 on the second controller
1002 * -> 28+32 = 60. 995 * -> 28+32 = 60.
1003 */ 996 */
1004 if (has_second_ohare && 997 if (has_second_ohare &&
1005 dev->vendor == PCI_VENDOR_ID_DEC && 998 dev->vendor == PCI_VENDOR_ID_DEC &&
1006 dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { 999 dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
1007 dev->irq = irq_create_mapping(NULL, 60); 1000 dev->irq = irq_create_mapping(NULL, 60);
1008 set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); 1001 set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
1009 }
1010#endif /* CONFIG_PPC32 */
1011 } 1002 }
1003#endif /* CONFIG_PPC32 */
1012} 1004}
1013 1005
1014#ifdef CONFIG_PPC64 1006#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 94e7b24b840b..6e090a7dea83 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -20,7 +20,7 @@ extern void pmac_get_rtc_time(struct rtc_time *);
20extern int pmac_set_rtc_time(struct rtc_time *); 20extern int pmac_set_rtc_time(struct rtc_time *);
21extern void pmac_read_rtc_time(void); 21extern void pmac_read_rtc_time(void);
22extern void pmac_calibrate_decr(void); 22extern void pmac_calibrate_decr(void);
23extern void pmac_pcibios_fixup(void); 23extern void pmac_pci_irq_fixup(struct pci_dev *);
24extern void pmac_pci_init(void); 24extern void pmac_pci_init(void);
25extern unsigned long pmac_ide_get_base(int index); 25extern unsigned long pmac_ide_get_base(int index);
26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, 26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 824a618396ab..d949e9df41ef 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -70,6 +70,7 @@
70#include <asm/pmac_feature.h> 70#include <asm/pmac_feature.h>
71#include <asm/time.h> 71#include <asm/time.h>
72#include <asm/of_device.h> 72#include <asm/of_device.h>
73#include <asm/of_platform.h>
73#include <asm/mmu_context.h> 74#include <asm/mmu_context.h>
74#include <asm/iommu.h> 75#include <asm/iommu.h>
75#include <asm/smu.h> 76#include <asm/smu.h>
@@ -361,7 +362,7 @@ char *bootdevice;
361void *boot_host; 362void *boot_host;
362int boot_target; 363int boot_target;
363int boot_part; 364int boot_part;
364extern dev_t boot_dev; 365static dev_t boot_dev;
365 366
366#ifdef CONFIG_SCSI 367#ifdef CONFIG_SCSI
367void __init note_scsi_host(struct device_node *node, void *host) 368void __init note_scsi_host(struct device_node *node, void *host)
@@ -676,8 +677,6 @@ static int __init pmac_probe(void)
676 677
677#ifdef CONFIG_PPC32 678#ifdef CONFIG_PPC32
678 /* isa_io_base gets set in pmac_pci_init */ 679 /* isa_io_base gets set in pmac_pci_init */
679 isa_mem_base = PMAC_ISA_MEM_BASE;
680 pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
681 ISA_DMA_THRESHOLD = ~0L; 680 ISA_DMA_THRESHOLD = ~0L;
682 DMA_MODE_READ = 1; 681 DMA_MODE_READ = 1;
683 DMA_MODE_WRITE = 2; 682 DMA_MODE_WRITE = 2;
@@ -727,7 +726,7 @@ define_machine(powermac) {
727 .show_cpuinfo = pmac_show_cpuinfo, 726 .show_cpuinfo = pmac_show_cpuinfo,
728 .init_IRQ = pmac_pic_init, 727 .init_IRQ = pmac_pic_init,
729 .get_irq = NULL, /* changed later */ 728 .get_irq = NULL, /* changed later */
730 .pcibios_fixup = pmac_pcibios_fixup, 729 .pci_irq_fixup = pmac_pci_irq_fixup,
731 .restart = pmac_restart, 730 .restart = pmac_restart,
732 .power_off = pmac_power_off, 731 .power_off = pmac_power_off,
733 .halt = pmac_halt, 732 .halt = pmac_halt,
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
new file mode 100644
index 000000000000..de52ec4e9e58
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -0,0 +1,54 @@
1menu "PS3 Platform Options"
2 depends on PPC_PS3
3
4config PS3_HTAB_SIZE
5 depends on PPC_PS3
6 int "PS3 Platform pagetable size"
7 range 18 20
8 default 20
9 help
10 This option is only for experts who may have the desire to fine
11 tune the pagetable size on their system. The value here is
12 expressed as the log2 of the page table size. Valid values are
13 18, 19, and 20, corresponding to 256KB, 512KB and 1MB respectively.
14
15 If unsure, choose the default (20) with the confidence that your
16 system will have optimal runtime performance.
17
18config PS3_DYNAMIC_DMA
19 depends on PPC_PS3 && EXPERIMENTAL
20 bool "PS3 Platform dynamic DMA page table management"
21 default n
22 help
23 This option will enable kernel support to take advantage of the
24 per device dynamic DMA page table management provided by the Cell
25 processor's IO Controller. This support incurs some runtime
26 overhead and also slightly increases kernel memory usage. The
27 current implementation should be considered experimental.
28
29 This support is mainly for Linux kernel development. If unsure,
30 say N.
31
32config PS3_USE_LPAR_ADDR
33 depends on PPC_PS3 && EXPERIMENTAL
34 bool "PS3 use lpar address space"
35 default y
36 help
37 This option is solely for experimentation by experts. Disables
38 translation of lpar addresses. SPE support currently won't work
39 without this set to y.
40
41 If you have any doubt, choose the default y.
42
43config PS3_VUART
44 depends on PPC_PS3
45 bool "PS3 Virtual UART support"
46 default y
47 help
48 Include support for the PS3 Virtual UART.
49
50 This support is required for several system services
51 including the System Manager and AV Settings. In
52 general, all users will say Y.
53
54endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
new file mode 100644
index 000000000000..3757cfabc8ce
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -0,0 +1,4 @@
1obj-y += setup.o mm.o smp.o time.o hvcall.o htab.o repository.o
2obj-y += interrupt.o exports.o os-area.o
3
4obj-$(CONFIG_SPU_BASE) += spu.o
diff --git a/arch/powerpc/platforms/ps3/exports.c b/arch/powerpc/platforms/ps3/exports.c
new file mode 100644
index 000000000000..a7e8ffd24a65
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/exports.c
@@ -0,0 +1,27 @@
1/*
2 * PS3 hvcall exports for modules.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/module.h>
22
23#define LV1_CALL(name, in, out, num) \
24 extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
25 EXPORT_SYMBOL(_lv1_##name);
26
27#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
new file mode 100644
index 000000000000..8fe1769655a3
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -0,0 +1,277 @@
1/*
2 * PS3 pagetable management routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22
23#include <asm/machdep.h>
24#include <asm/lmb.h>
25#include <asm/udbg.h>
26#include <asm/ps3.h>
27#include <asm/lv1call.h>
28
29#include "platform.h"
30
31#if defined(DEBUG)
32#define DBG(fmt...) udbg_printf(fmt)
33#else
34#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
35#endif
36
37static hpte_t *htab;
38static unsigned long htab_addr;
39static unsigned char *bolttab;
40static unsigned char *inusetab;
41
42static spinlock_t ps3_bolttab_lock = SPIN_LOCK_UNLOCKED;
43
44#define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
45 _debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
46static void _debug_dump_hpte(unsigned long pa, unsigned long va,
47 unsigned long group, unsigned long bitmap, hpte_t lhpte, int psize,
48 unsigned long slot, const char* func, int line)
49{
50 DBG("%s:%d: pa = %lxh\n", func, line, pa);
51 DBG("%s:%d: lpar = %lxh\n", func, line,
52 ps3_mm_phys_to_lpar(pa));
53 DBG("%s:%d: va = %lxh\n", func, line, va);
54 DBG("%s:%d: group = %lxh\n", func, line, group);
55 DBG("%s:%d: bitmap = %lxh\n", func, line, bitmap);
56 DBG("%s:%d: hpte.v = %lxh\n", func, line, lhpte.v);
57 DBG("%s:%d: hpte.r = %lxh\n", func, line, lhpte.r);
58 DBG("%s:%d: psize = %xh\n", func, line, psize);
59 DBG("%s:%d: slot = %lxh\n", func, line, slot);
60}
61
62static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
63 unsigned long pa, unsigned long rflags, unsigned long vflags, int psize)
64{
65 unsigned long slot;
66 hpte_t lhpte;
67 int secondary = 0;
68 unsigned long result;
69 unsigned long bitmap;
70 unsigned long flags;
71 unsigned long p_pteg, s_pteg, b_index, b_mask, cb, ci;
72
73 vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
74
75 lhpte.v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
76 lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
77
78 p_pteg = hpte_group / HPTES_PER_GROUP;
79 s_pteg = ~p_pteg & htab_hash_mask;
80
81 spin_lock_irqsave(&ps3_bolttab_lock, flags);
82
83 BUG_ON(bolttab[p_pteg] == 0xff && bolttab[s_pteg] == 0xff);
84
85 bitmap = (inusetab[p_pteg] << 8) | inusetab[s_pteg];
86
87 if (bitmap == 0xffff) {
88 /*
89 * PTEG is full. Search for victim.
90 */
91 bitmap &= ~((bolttab[p_pteg] << 8) | bolttab[s_pteg]);
92 do {
93 ci = mftb() & 15;
94 cb = 0x8000UL >> ci;
95 } while ((cb & bitmap) == 0);
96 } else {
97 /*
98 * search free slot in hardware order
99 * [primary] 0, 2, 4, 6, 1, 3, 5, 7
100 * [secondary] 0, 2, 4, 6, 1, 3, 5, 7
101 */
102 for (ci = 0; ci < HPTES_PER_GROUP; ci += 2) {
103 cb = 0x8000UL >> ci;
104 if ((cb & bitmap) == 0)
105 goto found;
106 }
107 for (ci = 1; ci < HPTES_PER_GROUP; ci += 2) {
108 cb = 0x8000UL >> ci;
109 if ((cb & bitmap) == 0)
110 goto found;
111 }
112 for (ci = HPTES_PER_GROUP; ci < HPTES_PER_GROUP*2; ci += 2) {
113 cb = 0x8000UL >> ci;
114 if ((cb & bitmap) == 0)
115 goto found;
116 }
117 for (ci = HPTES_PER_GROUP+1; ci < HPTES_PER_GROUP*2; ci += 2) {
118 cb = 0x8000UL >> ci;
119 if ((cb & bitmap) == 0)
120 goto found;
121 }
122 }
123
124found:
125 if (ci < HPTES_PER_GROUP) {
126 slot = p_pteg * HPTES_PER_GROUP + ci;
127 } else {
128 slot = s_pteg * HPTES_PER_GROUP + (ci & 7);
129 /* lhpte.dw0.dw0.h = 1; */
130 vflags |= HPTE_V_SECONDARY;
131 lhpte.v |= HPTE_V_SECONDARY;
132 }
133
134 result = lv1_write_htab_entry(0, slot, lhpte.v, lhpte.r);
135
136 if (result) {
137 debug_dump_hpte(pa, va, hpte_group, bitmap, lhpte, psize, slot);
138 BUG();
139 }
140
141 /*
142 * If used slot is not in primary HPTE group,
143 * the slot should be in secondary HPTE group.
144 */
145
146 if ((hpte_group ^ slot) & ~(HPTES_PER_GROUP - 1)) {
147 secondary = 1;
148 b_index = s_pteg;
149 } else {
150 secondary = 0;
151 b_index = p_pteg;
152 }
153
154 b_mask = (lhpte.v & HPTE_V_BOLTED) ? 1 << 7 : 0 << 7;
155 bolttab[b_index] |= b_mask >> (slot & 7);
156 b_mask = 1 << 7;
157 inusetab[b_index] |= b_mask >> (slot & 7);
158 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
159
160 return (slot & 7) | (secondary << 3);
161}
162
163static long ps3_hpte_remove(unsigned long hpte_group)
164{
165 panic("ps3_hpte_remove() not implemented");
166 return 0;
167}
168
169static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
170 unsigned long va, int psize, int local)
171{
172 unsigned long flags;
173 unsigned long result;
174 unsigned long pteg, bit;
175 unsigned long hpte_v, want_v;
176
177 want_v = hpte_encode_v(va, psize);
178
179 spin_lock_irqsave(&ps3_bolttab_lock, flags);
180
181 hpte_v = htab[slot].v;
182 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
183 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
184
185 /* ps3_hpte_insert() will be used to update PTE */
186 return -1;
187 }
188
189 result = lv1_write_htab_entry(0, slot, 0, 0);
190
191 if (result) {
192 DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
193 __func__, va, slot, psize, result, result);
194 BUG();
195 }
196
197 pteg = slot / HPTES_PER_GROUP;
198 bit = slot % HPTES_PER_GROUP;
199 inusetab[pteg] &= ~(0x80 >> bit);
200
201 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
202
203 /* ps3_hpte_insert() will be used to update PTE */
204 return -1;
205}
206
207static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize)
209{
210 panic("ps3_hpte_updateboltedpp() not implemented");
211}
212
213static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
214 int psize, int local)
215{
216 unsigned long flags;
217 unsigned long result;
218 unsigned long pteg, bit;
219
220 spin_lock_irqsave(&ps3_bolttab_lock, flags);
221 result = lv1_write_htab_entry(0, slot, 0, 0);
222
223 if (result) {
224 DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
225 __func__, va, slot, psize, result, result);
226 BUG();
227 }
228
229 pteg = slot / HPTES_PER_GROUP;
230 bit = slot % HPTES_PER_GROUP;
231 inusetab[pteg] &= ~(0x80 >> bit);
232 spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
233}
234
235static void ps3_hpte_clear(void)
236{
237 lv1_unmap_htab(htab_addr);
238}
239
240void __init ps3_hpte_init(unsigned long htab_size)
241{
242 long bitmap_size;
243
244 DBG(" -> %s:%d\n", __func__, __LINE__);
245
246 ppc_md.hpte_invalidate = ps3_hpte_invalidate;
247 ppc_md.hpte_updatepp = ps3_hpte_updatepp;
248 ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
249 ppc_md.hpte_insert = ps3_hpte_insert;
250 ppc_md.hpte_remove = ps3_hpte_remove;
251 ppc_md.hpte_clear_all = ps3_hpte_clear;
252
253 ppc64_pft_size = __ilog2(htab_size);
254
255 bitmap_size = htab_size / sizeof(hpte_t) / 8;
256
257 bolttab = __va(lmb_alloc(bitmap_size, 1));
258 inusetab = __va(lmb_alloc(bitmap_size, 1));
259
260 memset(bolttab, 0, bitmap_size);
261 memset(inusetab, 0, bitmap_size);
262
263 DBG(" <- %s:%d\n", __func__, __LINE__);
264}
265
266void __init ps3_map_htab(void)
267{
268 long result;
269 unsigned long htab_size = (1UL << ppc64_pft_size);
270
271 result = lv1_map_htab(0, &htab_addr);
272
273 htab = (hpte_t *)__ioremap(htab_addr, htab_size, PAGE_READONLY_X);
274
275 DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
276 htab_addr, (unsigned long)htab);
277}
diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S
new file mode 100644
index 000000000000..54be6523a0e8
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/hvcall.S
@@ -0,0 +1,804 @@
1/*
2 * PS3 hvcall interface.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 * Copyright 2003, 2004 (c) MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <asm/processor.h>
23#include <asm/ppc_asm.h>
24
25#define lv1call .long 0x44000022; extsw r3, r3
26
27#define LV1_N_IN_0_OUT(API_NAME, API_NUMBER) \
28_GLOBAL(_##API_NAME) \
29 \
30 mflr r0; \
31 std r0, 16(r1); \
32 \
33 li r11, API_NUMBER; \
34 lv1call; \
35 \
36 ld r0, 16(r1); \
37 mtlr r0; \
38 blr
39
40#define LV1_0_IN_0_OUT LV1_N_IN_0_OUT
41#define LV1_1_IN_0_OUT LV1_N_IN_0_OUT
42#define LV1_2_IN_0_OUT LV1_N_IN_0_OUT
43#define LV1_3_IN_0_OUT LV1_N_IN_0_OUT
44#define LV1_4_IN_0_OUT LV1_N_IN_0_OUT
45#define LV1_5_IN_0_OUT LV1_N_IN_0_OUT
46#define LV1_6_IN_0_OUT LV1_N_IN_0_OUT
47#define LV1_7_IN_0_OUT LV1_N_IN_0_OUT
48
49#define LV1_0_IN_1_OUT(API_NAME, API_NUMBER) \
50_GLOBAL(_##API_NAME) \
51 \
52 mflr r0; \
53 std r0, 16(r1); \
54 \
55 stdu r3, -8(r1); \
56 \
57 li r11, API_NUMBER; \
58 lv1call; \
59 \
60 addi r1, r1, 8; \
61 ld r11, -8(r1); \
62 std r4, 0(r11); \
63 \
64 ld r0, 16(r1); \
65 mtlr r0; \
66 blr
67
68#define LV1_0_IN_2_OUT(API_NAME, API_NUMBER) \
69_GLOBAL(_##API_NAME) \
70 \
71 mflr r0; \
72 std r0, 16(r1); \
73 \
74 std r3, -8(r1); \
75 stdu r4, -16(r1); \
76 \
77 li r11, API_NUMBER; \
78 lv1call; \
79 \
80 addi r1, r1, 16; \
81 ld r11, -8(r1); \
82 std r4, 0(r11); \
83 ld r11, -16(r1); \
84 std r5, 0(r11); \
85 \
86 ld r0, 16(r1); \
87 mtlr r0; \
88 blr
89
90#define LV1_0_IN_3_OUT(API_NAME, API_NUMBER) \
91_GLOBAL(_##API_NAME) \
92 \
93 mflr r0; \
94 std r0, 16(r1); \
95 \
96 std r3, -8(r1); \
97 std r4, -16(r1); \
98 stdu r5, -24(r1); \
99 \
100 li r11, API_NUMBER; \
101 lv1call; \
102 \
103 addi r1, r1, 24; \
104 ld r11, -8(r1); \
105 std r4, 0(r11); \
106 ld r11, -16(r1); \
107 std r5, 0(r11); \
108 ld r11, -24(r1); \
109 std r6, 0(r11); \
110 \
111 ld r0, 16(r1); \
112 mtlr r0; \
113 blr
114
115#define LV1_0_IN_7_OUT(API_NAME, API_NUMBER) \
116_GLOBAL(_##API_NAME) \
117 \
118 mflr r0; \
119 std r0, 16(r1); \
120 \
121 std r3, -8(r1); \
122 std r4, -16(r1); \
123 std r5, -24(r1); \
124 std r6, -32(r1); \
125 std r7, -40(r1); \
126 std r8, -48(r1); \
127 stdu r9, -56(r1); \
128 \
129 li r11, API_NUMBER; \
130 lv1call; \
131 \
132 addi r1, r1, 56; \
133 ld r11, -8(r1); \
134 std r4, 0(r11); \
135 ld r11, -16(r1); \
136 std r5, 0(r11); \
137 ld r11, -24(r1); \
138 std r6, 0(r11); \
139 ld r11, -32(r1); \
140 std r7, 0(r11); \
141 ld r11, -40(r1); \
142 std r8, 0(r11); \
143 ld r11, -48(r1); \
144 std r9, 0(r11); \
145 ld r11, -56(r1); \
146 std r10, 0(r11); \
147 \
148 ld r0, 16(r1); \
149 mtlr r0; \
150 blr
151
152#define LV1_1_IN_1_OUT(API_NAME, API_NUMBER) \
153_GLOBAL(_##API_NAME) \
154 \
155 mflr r0; \
156 std r0, 16(r1); \
157 \
158 stdu r4, -8(r1); \
159 \
160 li r11, API_NUMBER; \
161 lv1call; \
162 \
163 addi r1, r1, 8; \
164 ld r11, -8(r1); \
165 std r4, 0(r11); \
166 \
167 ld r0, 16(r1); \
168 mtlr r0; \
169 blr
170
171#define LV1_1_IN_2_OUT(API_NAME, API_NUMBER) \
172_GLOBAL(_##API_NAME) \
173 \
174 mflr r0; \
175 std r0, 16(r1); \
176 \
177 std r4, -8(r1); \
178 stdu r5, -16(r1); \
179 \
180 li r11, API_NUMBER; \
181 lv1call; \
182 \
183 addi r1, r1, 16; \
184 ld r11, -8(r1); \
185 std r4, 0(r11); \
186 ld r11, -16(r1); \
187 std r5, 0(r11); \
188 \
189 ld r0, 16(r1); \
190 mtlr r0; \
191 blr
192
193#define LV1_1_IN_3_OUT(API_NAME, API_NUMBER) \
194_GLOBAL(_##API_NAME) \
195 \
196 mflr r0; \
197 std r0, 16(r1); \
198 \
199 std r4, -8(r1); \
200 std r5, -16(r1); \
201 stdu r6, -24(r1); \
202 \
203 li r11, API_NUMBER; \
204 lv1call; \
205 \
206 addi r1, r1, 24; \
207 ld r11, -8(r1); \
208 std r4, 0(r11); \
209 ld r11, -16(r1); \
210 std r5, 0(r11); \
211 ld r11, -24(r1); \
212 std r6, 0(r11); \
213 \
214 ld r0, 16(r1); \
215 mtlr r0; \
216 blr
217
218#define LV1_1_IN_4_OUT(API_NAME, API_NUMBER) \
219_GLOBAL(_##API_NAME) \
220 \
221 mflr r0; \
222 std r0, 16(r1); \
223 \
224 std r4, -8(r1); \
225 std r5, -16(r1); \
226 std r6, -24(r1); \
227 stdu r7, -32(r1); \
228 \
229 li r11, API_NUMBER; \
230 lv1call; \
231 \
232 addi r1, r1, 32; \
233 ld r11, -8(r1); \
234 std r4, 0(r11); \
235 ld r11, -16(r1); \
236 std r5, 0(r11); \
237 ld r11, -24(r1); \
238 std r6, 0(r11); \
239 ld r11, -32(r1); \
240 std r7, 0(r11); \
241 \
242 ld r0, 16(r1); \
243 mtlr r0; \
244 blr
245
246#define LV1_1_IN_5_OUT(API_NAME, API_NUMBER) \
247_GLOBAL(_##API_NAME) \
248 \
249 mflr r0; \
250 std r0, 16(r1); \
251 \
252 std r4, -8(r1); \
253 std r5, -16(r1); \
254 std r6, -24(r1); \
255 std r7, -32(r1); \
256 stdu r8, -40(r1); \
257 \
258 li r11, API_NUMBER; \
259 lv1call; \
260 \
261 addi r1, r1, 40; \
262 ld r11, -8(r1); \
263 std r4, 0(r11); \
264 ld r11, -16(r1); \
265 std r5, 0(r11); \
266 ld r11, -24(r1); \
267 std r6, 0(r11); \
268 ld r11, -32(r1); \
269 std r7, 0(r11); \
270 ld r11, -40(r1); \
271 std r8, 0(r11); \
272 \
273 ld r0, 16(r1); \
274 mtlr r0; \
275 blr
276
277#define LV1_1_IN_6_OUT(API_NAME, API_NUMBER) \
278_GLOBAL(_##API_NAME) \
279 \
280 mflr r0; \
281 std r0, 16(r1); \
282 \
283 std r4, -8(r1); \
284 std r5, -16(r1); \
285 std r6, -24(r1); \
286 std r7, -32(r1); \
287 std r8, -40(r1); \
288 stdu r9, -48(r1); \
289 \
290 li r11, API_NUMBER; \
291 lv1call; \
292 \
293 addi r1, r1, 48; \
294 ld r11, -8(r1); \
295 std r4, 0(r11); \
296 ld r11, -16(r1); \
297 std r5, 0(r11); \
298 ld r11, -24(r1); \
299 std r6, 0(r11); \
300 ld r11, -32(r1); \
301 std r7, 0(r11); \
302 ld r11, -40(r1); \
303 std r8, 0(r11); \
304 ld r11, -48(r1); \
305 std r9, 0(r11); \
306 \
307 ld r0, 16(r1); \
308 mtlr r0; \
309 blr
310
311#define LV1_1_IN_7_OUT(API_NAME, API_NUMBER) \
312_GLOBAL(_##API_NAME) \
313 \
314 mflr r0; \
315 std r0, 16(r1); \
316 \
317 std r4, -8(r1); \
318 std r5, -16(r1); \
319 std r6, -24(r1); \
320 std r7, -32(r1); \
321 std r8, -40(r1); \
322 std r9, -48(r1); \
323 stdu r10, -56(r1); \
324 \
325 li r11, API_NUMBER; \
326 lv1call; \
327 \
328 addi r1, r1, 56; \
329 ld r11, -8(r1); \
330 std r4, 0(r11); \
331 ld r11, -16(r1); \
332 std r5, 0(r11); \
333 ld r11, -24(r1); \
334 std r6, 0(r11); \
335 ld r11, -32(r1); \
336 std r7, 0(r11); \
337 ld r11, -40(r1); \
338 std r8, 0(r11); \
339 ld r11, -48(r1); \
340 std r9, 0(r11); \
341 ld r11, -56(r1); \
342 std r10, 0(r11); \
343 \
344 ld r0, 16(r1); \
345 mtlr r0; \
346 blr
347
348#define LV1_2_IN_1_OUT(API_NAME, API_NUMBER) \
349_GLOBAL(_##API_NAME) \
350 \
351 mflr r0; \
352 std r0, 16(r1); \
353 \
354 stdu r5, -8(r1); \
355 \
356 li r11, API_NUMBER; \
357 lv1call; \
358 \
359 addi r1, r1, 8; \
360 ld r11, -8(r1); \
361 std r4, 0(r11); \
362 \
363 ld r0, 16(r1); \
364 mtlr r0; \
365 blr
366
367#define LV1_2_IN_2_OUT(API_NAME, API_NUMBER) \
368_GLOBAL(_##API_NAME) \
369 \
370 mflr r0; \
371 std r0, 16(r1); \
372 \
373 std r5, -8(r1); \
374 stdu r6, -16(r1); \
375 \
376 li r11, API_NUMBER; \
377 lv1call; \
378 \
379 addi r1, r1, 16; \
380 ld r11, -8(r1); \
381 std r4, 0(r11); \
382 ld r11, -16(r1); \
383 std r5, 0(r11); \
384 \
385 ld r0, 16(r1); \
386 mtlr r0; \
387 blr
388
389#define LV1_2_IN_3_OUT(API_NAME, API_NUMBER) \
390_GLOBAL(_##API_NAME) \
391 \
392 mflr r0; \
393 std r0, 16(r1); \
394 \
395 std r5, -8(r1); \
396 std r6, -16(r1); \
397 stdu r7, -24(r1); \
398 \
399 li r11, API_NUMBER; \
400 lv1call; \
401 \
402 addi r1, r1, 24; \
403 ld r11, -8(r1); \
404 std r4, 0(r11); \
405 ld r11, -16(r1); \
406 std r5, 0(r11); \
407 ld r11, -24(r1); \
408 std r6, 0(r11); \
409 \
410 ld r0, 16(r1); \
411 mtlr r0; \
412 blr
413
414#define LV1_2_IN_4_OUT(API_NAME, API_NUMBER) \
415_GLOBAL(_##API_NAME) \
416 \
417 mflr r0; \
418 std r0, 16(r1); \
419 \
420 std r5, -8(r1); \
421 std r6, -16(r1); \
422 std r7, -24(r1); \
423 stdu r8, -32(r1); \
424 \
425 li r11, API_NUMBER; \
426 lv1call; \
427 \
428 addi r1, r1, 32; \
429 ld r11, -8(r1); \
430 std r4, 0(r11); \
431 ld r11, -16(r1); \
432 std r5, 0(r11); \
433 ld r11, -24(r1); \
434 std r6, 0(r11); \
435 ld r11, -32(r1); \
436 std r7, 0(r11); \
437 \
438 ld r0, 16(r1); \
439 mtlr r0; \
440 blr
441
442#define LV1_2_IN_5_OUT(API_NAME, API_NUMBER) \
443_GLOBAL(_##API_NAME) \
444 \
445 mflr r0; \
446 std r0, 16(r1); \
447 \
448 std r5, -8(r1); \
449 std r6, -16(r1); \
450 std r7, -24(r1); \
451 std r8, -32(r1); \
452 stdu r9, -40(r1); \
453 \
454 li r11, API_NUMBER; \
455 lv1call; \
456 \
457 addi r1, r1, 40; \
458 ld r11, -8(r1); \
459 std r4, 0(r11); \
460 ld r11, -16(r1); \
461 std r5, 0(r11); \
462 ld r11, -24(r1); \
463 std r6, 0(r11); \
464 ld r11, -32(r1); \
465 std r7, 0(r11); \
466 ld r11, -40(r1); \
467 std r8, 0(r11); \
468 \
469 ld r0, 16(r1); \
470 mtlr r0; \
471 blr
472
473#define LV1_3_IN_1_OUT(API_NAME, API_NUMBER) \
474_GLOBAL(_##API_NAME) \
475 \
476 mflr r0; \
477 std r0, 16(r1); \
478 \
479 stdu r6, -8(r1); \
480 \
481 li r11, API_NUMBER; \
482 lv1call; \
483 \
484 addi r1, r1, 8; \
485 ld r11, -8(r1); \
486 std r4, 0(r11); \
487 \
488 ld r0, 16(r1); \
489 mtlr r0; \
490 blr
491
492#define LV1_3_IN_2_OUT(API_NAME, API_NUMBER) \
493_GLOBAL(_##API_NAME) \
494 \
495 mflr r0; \
496 std r0, 16(r1); \
497 \
498 std r6, -8(r1); \
499 stdu r7, -16(r1); \
500 \
501 li r11, API_NUMBER; \
502 lv1call; \
503 \
504 addi r1, r1, 16; \
505 ld r11, -8(r1); \
506 std r4, 0(r11); \
507 ld r11, -16(r1); \
508 std r5, 0(r11); \
509 \
510 ld r0, 16(r1); \
511 mtlr r0; \
512 blr
513
514#define LV1_3_IN_3_OUT(API_NAME, API_NUMBER) \
515_GLOBAL(_##API_NAME) \
516 \
517 mflr r0; \
518 std r0, 16(r1); \
519 \
520 std r6, -8(r1); \
521 std r7, -16(r1); \
522 stdu r8, -24(r1); \
523 \
524 li r11, API_NUMBER; \
525 lv1call; \
526 \
527 addi r1, r1, 24; \
528 ld r11, -8(r1); \
529 std r4, 0(r11); \
530 ld r11, -16(r1); \
531 std r5, 0(r11); \
532 ld r11, -24(r1); \
533 std r6, 0(r11); \
534 \
535 ld r0, 16(r1); \
536 mtlr r0; \
537 blr
538
539#define LV1_4_IN_1_OUT(API_NAME, API_NUMBER) \
540_GLOBAL(_##API_NAME) \
541 \
542 mflr r0; \
543 std r0, 16(r1); \
544 \
545 stdu r7, -8(r1); \
546 \
547 li r11, API_NUMBER; \
548 lv1call; \
549 \
550 addi r1, r1, 8; \
551 ld r11, -8(r1); \
552 std r4, 0(r11); \
553 \
554 ld r0, 16(r1); \
555 mtlr r0; \
556 blr
557
558#define LV1_4_IN_2_OUT(API_NAME, API_NUMBER) \
559_GLOBAL(_##API_NAME) \
560 \
561 mflr r0; \
562 std r0, 16(r1); \
563 \
564 std r7, -8(r1); \
565 stdu r8, -16(r1); \
566 \
567 li r11, API_NUMBER; \
568 lv1call; \
569 \
570 addi r1, r1, 16; \
571 ld r11, -8(r1); \
572 std r4, 0(r11); \
573 ld r11, -16(r1); \
574 std r5, 0(r11); \
575 \
576 ld r0, 16(r1); \
577 mtlr r0; \
578 blr
579
580#define LV1_4_IN_3_OUT(API_NAME, API_NUMBER) \
581_GLOBAL(_##API_NAME) \
582 \
583 mflr r0; \
584 std r0, 16(r1); \
585 \
586 std r7, -8(r1); \
587 std r8, -16(r1); \
588 stdu r9, -24(r1); \
589 \
590 li r11, API_NUMBER; \
591 lv1call; \
592 \
593 addi r1, r1, 24; \
594 ld r11, -8(r1); \
595 std r4, 0(r11); \
596 ld r11, -16(r1); \
597 std r5, 0(r11); \
598 ld r11, -24(r1); \
599 std r6, 0(r11); \
600 \
601 ld r0, 16(r1); \
602 mtlr r0; \
603 blr
604
605#define LV1_5_IN_1_OUT(API_NAME, API_NUMBER) \
606_GLOBAL(_##API_NAME) \
607 \
608 mflr r0; \
609 std r0, 16(r1); \
610 \
611 stdu r8, -8(r1); \
612 \
613 li r11, API_NUMBER; \
614 lv1call; \
615 \
616 addi r1, r1, 8; \
617 ld r11, -8(r1); \
618 std r4, 0(r11); \
619 \
620 ld r0, 16(r1); \
621 mtlr r0; \
622 blr
623
624#define LV1_5_IN_2_OUT(API_NAME, API_NUMBER) \
625_GLOBAL(_##API_NAME) \
626 \
627 mflr r0; \
628 std r0, 16(r1); \
629 \
630 std r8, -8(r1); \
631 stdu r9, -16(r1); \
632 \
633 li r11, API_NUMBER; \
634 lv1call; \
635 \
636 addi r1, r1, 16; \
637 ld r11, -8(r1); \
638 std r4, 0(r11); \
639 ld r11, -16(r1); \
640 std r5, 0(r11); \
641 \
642 ld r0, 16(r1); \
643 mtlr r0; \
644 blr
645
646#define LV1_5_IN_3_OUT(API_NAME, API_NUMBER) \
647_GLOBAL(_##API_NAME) \
648 \
649 mflr r0; \
650 std r0, 16(r1); \
651 \
652 std r8, -8(r1); \
653 std r9, -16(r1); \
654 stdu r10, -24(r1); \
655 \
656 li r11, API_NUMBER; \
657 lv1call; \
658 \
659 addi r1, r1, 24; \
660 ld r11, -8(r1); \
661 std r4, 0(r11); \
662 ld r11, -16(r1); \
663 std r5, 0(r11); \
664 ld r11, -24(r1); \
665 std r6, 0(r11); \
666 \
667 ld r0, 16(r1); \
668 mtlr r0; \
669 blr
670
671#define LV1_6_IN_1_OUT(API_NAME, API_NUMBER) \
672_GLOBAL(_##API_NAME) \
673 \
674 mflr r0; \
675 std r0, 16(r1); \
676 \
677 stdu r9, -8(r1); \
678 \
679 li r11, API_NUMBER; \
680 lv1call; \
681 \
682 addi r1, r1, 8; \
683 ld r11, -8(r1); \
684 std r4, 0(r11); \
685 \
686 ld r0, 16(r1); \
687 mtlr r0; \
688 blr
689
690#define LV1_6_IN_2_OUT(API_NAME, API_NUMBER) \
691_GLOBAL(_##API_NAME) \
692 \
693 mflr r0; \
694 std r0, 16(r1); \
695 \
696 std r9, -8(r1); \
697 stdu r10, -16(r1); \
698 \
699 li r11, API_NUMBER; \
700 lv1call; \
701 \
702 addi r1, r1, 16; \
703 ld r11, -8(r1); \
704 std r4, 0(r11); \
705 ld r11, -16(r1); \
706 std r5, 0(r11); \
707 \
708 ld r0, 16(r1); \
709 mtlr r0; \
710 blr
711
712#define LV1_6_IN_3_OUT(API_NAME, API_NUMBER) \
713_GLOBAL(_##API_NAME) \
714 \
715 mflr r0; \
716 std r0, 16(r1); \
717 \
718 std r9, -8(r1); \
719 stdu r10, -16(r1); \
720 \
721 li r11, API_NUMBER; \
722 lv1call; \
723 \
724 addi r1, r1, 16; \
725 ld r11, -8(r1); \
726 std r4, 0(r11); \
727 ld r11, -16(r1); \
728 std r5, 0(r11); \
729 ld r11, 48+8*8(r1); \
730 std r6, 0(r11); \
731 \
732 ld r0, 16(r1); \
733 mtlr r0; \
734 blr
735
736#define LV1_7_IN_1_OUT(API_NAME, API_NUMBER) \
737_GLOBAL(_##API_NAME) \
738 \
739 mflr r0; \
740 std r0, 16(r1); \
741 \
742 stdu r10, -8(r1); \
743 \
744 li r11, API_NUMBER; \
745 lv1call; \
746 \
747 addi r1, r1, 8; \
748 ld r11, -8(r1); \
749 std r4, 0(r11); \
750 \
751 ld r0, 16(r1); \
752 mtlr r0; \
753 blr
754
755#define LV1_7_IN_6_OUT(API_NAME, API_NUMBER) \
756_GLOBAL(_##API_NAME) \
757 \
758 mflr r0; \
759 std r0, 16(r1); \
760 \
761 std r10, 48+8*7(r1); \
762 \
763 li r11, API_NUMBER; \
764 lv1call; \
765 \
766 ld r11, 48+8*7(r1); \
767 std r4, 0(r11); \
768 ld r11, 48+8*8(r1); \
769 std r5, 0(r11); \
770 ld r11, 48+8*9(r1); \
771 std r6, 0(r11); \
772 ld r11, 48+8*10(r1); \
773 std r7, 0(r11); \
774 ld r11, 48+8*11(r1); \
775 std r8, 0(r11); \
776 ld r11, 48+8*12(r1); \
777 std r9, 0(r11); \
778 \
779 ld r0, 16(r1); \
780 mtlr r0; \
781 blr
782
783#define LV1_8_IN_1_OUT(API_NAME, API_NUMBER) \
784_GLOBAL(_##API_NAME) \
785 \
786 mflr r0; \
787 std r0, 16(r1); \
788 \
789 li r11, API_NUMBER; \
790 lv1call; \
791 \
792 ld r11, 48+8*8(r1); \
793 std r4, 0(r11); \
794 \
795 ld r0, 16(r1); \
796 mtlr r0; \
797 blr
798
799 .text
800
801/* the lv1 underscored call definitions expand here */
802
803#define LV1_CALL(name, in, out, num) LV1_##in##_IN_##out##_OUT(lv1_##name, num)
804#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
new file mode 100644
index 000000000000..056c1e4141ba
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -0,0 +1,575 @@
1/*
2 * PS3 interrupt routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/irq.h>
24
25#include <asm/machdep.h>
26#include <asm/udbg.h>
27#include <asm/ps3.h>
28#include <asm/lv1call.h>
29
30#include "platform.h"
31
32#if defined(DEBUG)
33#define DBG(fmt...) udbg_printf(fmt)
34#else
35#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
36#endif
37
38/**
39 * ps3_alloc_io_irq - Assign a virq to a system bus device.
40 * interrupt_id: The device interrupt id read from the system repository.
41 * @virq: The assigned Linux virq.
42 *
43 * An io irq represents a non-virtualized device interrupt. interrupt_id
44 * coresponds to the interrupt number of the interrupt controller.
45 */
46
47int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq)
48{
49 int result;
50 unsigned long outlet;
51
52 result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
53
54 if (result) {
55 pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
56 __func__, __LINE__, ps3_result(result));
57 return result;
58 }
59
60 *virq = irq_create_mapping(NULL, outlet);
61
62 pr_debug("%s:%d: interrupt_id %u => outlet %lu, virq %u\n",
63 __func__, __LINE__, interrupt_id, outlet, *virq);
64
65 return 0;
66}
67
68int ps3_free_io_irq(unsigned int virq)
69{
70 int result;
71
72 result = lv1_destruct_io_irq_outlet(virq_to_hw(virq));
73
74 if (!result)
75 pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
76 __func__, __LINE__, ps3_result(result));
77
78 irq_dispose_mapping(virq);
79
80 return result;
81}
82
83/**
84 * ps3_alloc_event_irq - Allocate a virq for use with a system event.
85 * @virq: The assigned Linux virq.
86 *
87 * The virq can be used with lv1_connect_interrupt_event_receive_port() to
88 * arrange to receive events, or with ps3_send_event_locally() to signal
89 * events.
90 */
91
92int ps3_alloc_event_irq(unsigned int *virq)
93{
94 int result;
95 unsigned long outlet;
96
97 result = lv1_construct_event_receive_port(&outlet);
98
99 if (result) {
100 pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
101 __func__, __LINE__, ps3_result(result));
102 *virq = NO_IRQ;
103 return result;
104 }
105
106 *virq = irq_create_mapping(NULL, outlet);
107
108 pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__, outlet,
109 *virq);
110
111 return 0;
112}
113
114int ps3_free_event_irq(unsigned int virq)
115{
116 int result;
117
118 pr_debug(" -> %s:%d\n", __func__, __LINE__);
119
120 result = lv1_destruct_event_receive_port(virq_to_hw(virq));
121
122 if (result)
123 pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
124 __func__, __LINE__, ps3_result(result));
125
126 irq_dispose_mapping(virq);
127
128 pr_debug(" <- %s:%d\n", __func__, __LINE__);
129 return result;
130}
131
132int ps3_send_event_locally(unsigned int virq)
133{
134 return lv1_send_event_locally(virq_to_hw(virq));
135}
136
137/**
138 * ps3_connect_event_irq - Assign a virq to a system bus device.
139 * @did: The HV device identifier read from the system repository.
140 * @interrupt_id: The device interrupt id read from the system repository.
141 * @virq: The assigned Linux virq.
142 *
143 * An event irq represents a virtual device interrupt. The interrupt_id
144 * coresponds to the software interrupt number.
145 */
146
147int ps3_connect_event_irq(const struct ps3_device_id *did,
148 unsigned int interrupt_id, unsigned int *virq)
149{
150 int result;
151
152 result = ps3_alloc_event_irq(virq);
153
154 if (result)
155 return result;
156
157 result = lv1_connect_interrupt_event_receive_port(did->bus_id,
158 did->dev_id, virq_to_hw(*virq), interrupt_id);
159
160 if (result) {
161 pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
162 " failed: %s\n", __func__, __LINE__,
163 ps3_result(result));
164 ps3_free_event_irq(*virq);
165 *virq = NO_IRQ;
166 return result;
167 }
168
169 pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
170 interrupt_id, *virq);
171
172 return 0;
173}
174
175int ps3_disconnect_event_irq(const struct ps3_device_id *did,
176 unsigned int interrupt_id, unsigned int virq)
177{
178 int result;
179
180 pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
181 interrupt_id, virq);
182
183 result = lv1_disconnect_interrupt_event_receive_port(did->bus_id,
184 did->dev_id, virq_to_hw(virq), interrupt_id);
185
186 if (result)
187 pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
188 " failed: %s\n", __func__, __LINE__,
189 ps3_result(result));
190
191 ps3_free_event_irq(virq);
192
193 pr_debug(" <- %s:%d\n", __func__, __LINE__);
194 return result;
195}
196
197/**
198 * ps3_alloc_vuart_irq - Configure the system virtual uart virq.
199 * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
200 * @virq: The assigned Linux virq.
201 *
202 * The system supports only a single virtual uart, so multiple calls without
203 * freeing the interrupt will return a wrong state error.
204 */
205
206int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq)
207{
208 int result;
209 unsigned long outlet;
210 unsigned long lpar_addr;
211
212 BUG_ON(!is_kernel_addr((unsigned long)virt_addr_bmp));
213
214 lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
215
216 result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
217
218 if (result) {
219 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
220 __func__, __LINE__, ps3_result(result));
221 return result;
222 }
223
224 *virq = irq_create_mapping(NULL, outlet);
225
226 pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__,
227 outlet, *virq);
228
229 return 0;
230}
231
232int ps3_free_vuart_irq(unsigned int virq)
233{
234 int result;
235
236 result = lv1_deconfigure_virtual_uart_irq();
237
238 if (result) {
239 pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
240 __func__, __LINE__, ps3_result(result));
241 return result;
242 }
243
244 irq_dispose_mapping(virq);
245
246 return result;
247}
248
249/**
250 * ps3_alloc_spe_irq - Configure an spe virq.
251 * @spe_id: The spe_id returned from lv1_construct_logical_spe().
252 * @class: The spe interrupt class {0,1,2}.
253 * @virq: The assigned Linux virq.
254 *
255 */
256
257int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
258 unsigned int *virq)
259{
260 int result;
261 unsigned long outlet;
262
263 BUG_ON(class > 2);
264
265 result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
266
267 if (result) {
268 pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
269 __func__, __LINE__, ps3_result(result));
270 return result;
271 }
272
273 *virq = irq_create_mapping(NULL, outlet);
274
275 pr_debug("%s:%d: spe_id %lu, class %u, outlet %lu, virq %u\n",
276 __func__, __LINE__, spe_id, class, outlet, *virq);
277
278 return 0;
279}
280
281int ps3_free_spe_irq(unsigned int virq)
282{
283 irq_dispose_mapping(virq);
284 return 0;
285}
286
287#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
288#define PS3_PLUG_MAX 63
289
290/**
291 * struct bmp - a per cpu irq status and mask bitmap structure
292 * @status: 256 bit status bitmap indexed by plug
293 * @unused_1:
294 * @mask: 256 bit mask bitmap indexed by plug
295 * @unused_2:
296 * @lock:
297 * @ipi_debug_brk_mask:
298 *
299 * The HV mantains per SMT thread mappings of HV outlet to HV plug on
300 * behalf of the guest. These mappings are implemented as 256 bit guest
301 * supplied bitmaps indexed by plug number. The address of the bitmaps are
302 * registered with the HV through lv1_configure_irq_state_bitmap().
303 *
304 * The HV supports 256 plugs per thread, assigned as {0..255}, for a total
305 * of 512 plugs supported on a processor. To simplify the logic this
306 * implementation equates HV plug value to linux virq value, constrains each
307 * interrupt to have a system wide unique plug number, and limits the range
308 * of the plug values to map into the first dword of the bitmaps. This
309 * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
310 * that there is no constraint on how many in this set an individual thread
311 * can aquire.
312 */
313
314struct bmp {
315 struct {
316 unsigned long status;
317 unsigned long unused_1[3];
318 unsigned long mask;
319 unsigned long unused_2[3];
320 } __attribute__ ((packed));
321 spinlock_t lock;
322 unsigned long ipi_debug_brk_mask;
323};
324
325/**
326 * struct private - a per cpu data structure
327 * @node: HV node id
328 * @cpu: HV thread id
329 * @bmp: an HV bmp structure
330 */
331
332struct private {
333 unsigned long node;
334 unsigned int cpu;
335 struct bmp bmp;
336};
337
338#if defined(DEBUG)
339static void _dump_64_bmp(const char *header, const unsigned long *p, unsigned cpu,
340 const char* func, int line)
341{
342 pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
343 func, line, header, cpu,
344 *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
345 *p & 0xffff);
346}
347
348static void __attribute__ ((unused)) _dump_256_bmp(const char *header,
349 const unsigned long *p, unsigned cpu, const char* func, int line)
350{
351 pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
352 func, line, header, cpu, p[0], p[1], p[2], p[3]);
353}
354
355#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
356static void _dump_bmp(struct private* pd, const char* func, int line)
357{
358 unsigned long flags;
359
360 spin_lock_irqsave(&pd->bmp.lock, flags);
361 _dump_64_bmp("stat", &pd->bmp.status, pd->cpu, func, line);
362 _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
363 spin_unlock_irqrestore(&pd->bmp.lock, flags);
364}
365
366#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
367static void __attribute__ ((unused)) _dump_mask(struct private* pd,
368 const char* func, int line)
369{
370 unsigned long flags;
371
372 spin_lock_irqsave(&pd->bmp.lock, flags);
373 _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
374 spin_unlock_irqrestore(&pd->bmp.lock, flags);
375}
376#else
377static void dump_bmp(struct private* pd) {};
378#endif /* defined(DEBUG) */
379
380static void chip_mask(unsigned int virq)
381{
382 unsigned long flags;
383 struct private *pd = get_irq_chip_data(virq);
384
385 pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
386
387 BUG_ON(virq < NUM_ISA_INTERRUPTS);
388 BUG_ON(virq > PS3_PLUG_MAX);
389
390 spin_lock_irqsave(&pd->bmp.lock, flags);
391 pd->bmp.mask &= ~(0x8000000000000000UL >> virq);
392 spin_unlock_irqrestore(&pd->bmp.lock, flags);
393
394 lv1_did_update_interrupt_mask(pd->node, pd->cpu);
395}
396
397static void chip_unmask(unsigned int virq)
398{
399 unsigned long flags;
400 struct private *pd = get_irq_chip_data(virq);
401
402 pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
403
404 BUG_ON(virq < NUM_ISA_INTERRUPTS);
405 BUG_ON(virq > PS3_PLUG_MAX);
406
407 spin_lock_irqsave(&pd->bmp.lock, flags);
408 pd->bmp.mask |= (0x8000000000000000UL >> virq);
409 spin_unlock_irqrestore(&pd->bmp.lock, flags);
410
411 lv1_did_update_interrupt_mask(pd->node, pd->cpu);
412}
413
414static void chip_eoi(unsigned int virq)
415{
416 lv1_end_of_interrupt(virq);
417}
418
419static struct irq_chip irq_chip = {
420 .typename = "ps3",
421 .mask = chip_mask,
422 .unmask = chip_unmask,
423 .eoi = chip_eoi,
424};
425
426static void host_unmap(struct irq_host *h, unsigned int virq)
427{
428 int result;
429
430 pr_debug("%s:%d: virq %d\n", __func__, __LINE__, virq);
431
432 lv1_disconnect_irq_plug(virq);
433
434 result = set_irq_chip_data(virq, NULL);
435 BUG_ON(result);
436}
437
438static DEFINE_PER_CPU(struct private, private);
439
440static int host_map(struct irq_host *h, unsigned int virq,
441 irq_hw_number_t hwirq)
442{
443 int result;
444 unsigned int cpu;
445
446 pr_debug(" -> %s:%d\n", __func__, __LINE__);
447 pr_debug("%s:%d: hwirq %lu => virq %u\n", __func__, __LINE__, hwirq,
448 virq);
449
450 /* bind this virq to a cpu */
451
452 preempt_disable();
453 cpu = smp_processor_id();
454 result = lv1_connect_irq_plug(virq, hwirq);
455 preempt_enable();
456
457 if (result) {
458 pr_info("%s:%d: lv1_connect_irq_plug failed:"
459 " %s\n", __func__, __LINE__, ps3_result(result));
460 return -EPERM;
461 }
462
463 result = set_irq_chip_data(virq, &per_cpu(private, cpu));
464 BUG_ON(result);
465
466 set_irq_chip_and_handler(virq, &irq_chip, handle_fasteoi_irq);
467
468 pr_debug(" <- %s:%d\n", __func__, __LINE__);
469 return result;
470}
471
472static struct irq_host_ops host_ops = {
473 .map = host_map,
474 .unmap = host_unmap,
475};
476
477void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
478{
479 struct private *pd = &per_cpu(private, cpu);
480
481 pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
482
483 pr_debug("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
484 cpu, virq, pd->bmp.ipi_debug_brk_mask);
485}
486
487static int bmp_get_and_clear_status_bit(struct bmp *m)
488{
489 unsigned long flags;
490 unsigned int bit;
491 unsigned long x;
492
493 spin_lock_irqsave(&m->lock, flags);
494
495 /* check for ipi break first to stop this cpu ASAP */
496
497 if (m->status & m->ipi_debug_brk_mask) {
498 m->status &= ~m->ipi_debug_brk_mask;
499 spin_unlock_irqrestore(&m->lock, flags);
500 return __ilog2(m->ipi_debug_brk_mask);
501 }
502
503 x = (m->status & m->mask);
504
505 for (bit = NUM_ISA_INTERRUPTS, x <<= bit; x; bit++, x <<= 1)
506 if (x & 0x8000000000000000UL) {
507 m->status &= ~(0x8000000000000000UL >> bit);
508 spin_unlock_irqrestore(&m->lock, flags);
509 return bit;
510 }
511
512 spin_unlock_irqrestore(&m->lock, flags);
513
514 pr_debug("%s:%d: not found\n", __func__, __LINE__);
515 return -1;
516}
517
518unsigned int ps3_get_irq(void)
519{
520 int plug;
521
522 struct private *pd = &__get_cpu_var(private);
523
524 plug = bmp_get_and_clear_status_bit(&pd->bmp);
525
526 if (plug < 1) {
527 pr_debug("%s:%d: no plug found: cpu %u\n", __func__, __LINE__,
528 pd->cpu);
529 dump_bmp(&per_cpu(private, 0));
530 dump_bmp(&per_cpu(private, 1));
531 return NO_IRQ;
532 }
533
534#if defined(DEBUG)
535 if (plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX) {
536 dump_bmp(&per_cpu(private, 0));
537 dump_bmp(&per_cpu(private, 1));
538 BUG();
539 }
540#endif
541 return plug;
542}
543
544void __init ps3_init_IRQ(void)
545{
546 int result;
547 unsigned long node;
548 unsigned cpu;
549 struct irq_host *host;
550
551 lv1_get_logical_ppe_id(&node);
552
553 host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &host_ops,
554 PS3_INVALID_OUTLET);
555 irq_set_default_host(host);
556 irq_set_virq_count(PS3_PLUG_MAX + 1);
557
558 for_each_possible_cpu(cpu) {
559 struct private *pd = &per_cpu(private, cpu);
560
561 pd->node = node;
562 pd->cpu = cpu;
563 spin_lock_init(&pd->bmp.lock);
564
565 result = lv1_configure_irq_state_bitmap(node, cpu,
566 ps3_mm_phys_to_lpar(__pa(&pd->bmp.status)));
567
568 if (result)
569 pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
570 " %s\n", __func__, __LINE__,
571 ps3_result(result));
572 }
573
574 ppc_md.get_irq = ps3_get_irq;
575}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
new file mode 100644
index 000000000000..49c0d010d491
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -0,0 +1,831 @@
1/*
2 * PS3 address space management.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/memory_hotplug.h>
24
25#include <asm/firmware.h>
26#include <asm/lmb.h>
27#include <asm/udbg.h>
28#include <asm/ps3.h>
29#include <asm/lv1call.h>
30
31#include "platform.h"
32
33#if defined(DEBUG)
34#define DBG(fmt...) udbg_printf(fmt)
35#else
36#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
37#endif
38
39enum {
40#if defined(CONFIG_PS3_USE_LPAR_ADDR)
41 USE_LPAR_ADDR = 1,
42#else
43 USE_LPAR_ADDR = 0,
44#endif
45#if defined(CONFIG_PS3_DYNAMIC_DMA)
46 USE_DYNAMIC_DMA = 1,
47#else
48 USE_DYNAMIC_DMA = 0,
49#endif
50};
51
52enum {
53 PAGE_SHIFT_4K = 12U,
54 PAGE_SHIFT_64K = 16U,
55 PAGE_SHIFT_16M = 24U,
56};
57
58static unsigned long make_page_sizes(unsigned long a, unsigned long b)
59{
60 return (a << 56) | (b << 48);
61}
62
63enum {
64 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
65 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
66};
67
68/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
69
70enum {
71 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
72 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
73};
74
75/*============================================================================*/
76/* virtual address space routines */
77/*============================================================================*/
78
79/**
80 * struct mem_region - memory region structure
81 * @base: base address
82 * @size: size in bytes
83 * @offset: difference between base and rm.size
84 */
85
86struct mem_region {
87 unsigned long base;
88 unsigned long size;
89 unsigned long offset;
90};
91
92/**
93 * struct map - address space state variables holder
94 * @total: total memory available as reported by HV
95 * @vas_id - HV virtual address space id
96 * @htab_size: htab size in bytes
97 *
98 * The HV virtual address space (vas) allows for hotplug memory regions.
99 * Memory regions can be created and destroyed in the vas at runtime.
100 * @rm: real mode (bootmem) region
101 * @r1: hotplug memory region(s)
102 *
103 * ps3 addresses
104 * virt_addr: a cpu 'translated' effective address
105 * phys_addr: an address in what Linux thinks is the physical address space
106 * lpar_addr: an address in the HV virtual address space
107 * bus_addr: an io controller 'translated' address on a device bus
108 */
109
110struct map {
111 unsigned long total;
112 unsigned long vas_id;
113 unsigned long htab_size;
114 struct mem_region rm;
115 struct mem_region r1;
116};
117
118#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
119static void _debug_dump_map(const struct map* m, const char* func, int line)
120{
121 DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
122 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
123 DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
124 DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
125 DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
126 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
127 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
128}
129
130static struct map map;
131
132/**
133 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
134 * @phys_addr: linux physical address
135 */
136
137unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
138{
139 BUG_ON(is_kernel_addr(phys_addr));
140 if (USE_LPAR_ADDR)
141 return phys_addr;
142 else
143 return (phys_addr < map.rm.size || phys_addr >= map.total)
144 ? phys_addr : phys_addr + map.r1.offset;
145}
146
147EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
148
149/**
150 * ps3_mm_vas_create - create the virtual address space
151 */
152
153void __init ps3_mm_vas_create(unsigned long* htab_size)
154{
155 int result;
156 unsigned long start_address;
157 unsigned long size;
158 unsigned long access_right;
159 unsigned long max_page_size;
160 unsigned long flags;
161
162 result = lv1_query_logical_partition_address_region_info(0,
163 &start_address, &size, &access_right, &max_page_size,
164 &flags);
165
166 if (result) {
167 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
168 "failed: %s\n", __func__, __LINE__,
169 ps3_result(result));
170 goto fail;
171 }
172
173 if (max_page_size < PAGE_SHIFT_16M) {
174 DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
175 max_page_size);
176 goto fail;
177 }
178
179 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
180 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
181
182 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
183 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
184 &map.vas_id, &map.htab_size);
185
186 if (result) {
187 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
188 __func__, __LINE__, ps3_result(result));
189 goto fail;
190 }
191
192 result = lv1_select_virtual_address_space(map.vas_id);
193
194 if (result) {
195 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
196 __func__, __LINE__, ps3_result(result));
197 goto fail;
198 }
199
200 *htab_size = map.htab_size;
201
202 debug_dump_map(&map);
203
204 return;
205
206fail:
207 panic("ps3_mm_vas_create failed");
208}
209
210/**
211 * ps3_mm_vas_destroy -
212 */
213
214void ps3_mm_vas_destroy(void)
215{
216 if (map.vas_id) {
217 lv1_select_virtual_address_space(0);
218 lv1_destruct_virtual_address_space(map.vas_id);
219 map.vas_id = 0;
220 }
221}
222
223/*============================================================================*/
224/* memory hotplug routines */
225/*============================================================================*/
226
227/**
228 * ps3_mm_region_create - create a memory region in the vas
229 * @r: pointer to a struct mem_region to accept initialized values
230 * @size: requested region size
231 *
232 * This implementation creates the region with the vas large page size.
233 * @size is rounded down to a multiple of the vas large page size.
234 */
235
236int ps3_mm_region_create(struct mem_region *r, unsigned long size)
237{
238 int result;
239 unsigned long muid;
240
241 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
242
243 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
244 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
245 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
246 (unsigned long)(size - r->size),
247 (size - r->size) / 1024 / 1024);
248
249 if (r->size == 0) {
250 DBG("%s:%d: size == 0\n", __func__, __LINE__);
251 result = -1;
252 goto zero_region;
253 }
254
255 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
256 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
257
258 if (result || r->base < map.rm.size) {
259 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
260 __func__, __LINE__, ps3_result(result));
261 goto zero_region;
262 }
263
264 r->offset = r->base - map.rm.size;
265 return result;
266
267zero_region:
268 r->size = r->base = r->offset = 0;
269 return result;
270}
271
272/**
273 * ps3_mm_region_destroy - destroy a memory region
274 * @r: pointer to struct mem_region
275 */
276
277void ps3_mm_region_destroy(struct mem_region *r)
278{
279 if (r->base) {
280 lv1_release_memory(r->base);
281 r->size = r->base = r->offset = 0;
282 map.total = map.rm.size;
283 }
284}
285
286/**
287 * ps3_mm_add_memory - hot add memory
288 */
289
290static int __init ps3_mm_add_memory(void)
291{
292 int result;
293 unsigned long start_addr;
294 unsigned long start_pfn;
295 unsigned long nr_pages;
296
297 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
298 return 0;
299
300 BUG_ON(!mem_init_done);
301
302 start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
303 start_pfn = start_addr >> PAGE_SHIFT;
304 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
305
306 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
307 __func__, __LINE__, start_addr, start_pfn, nr_pages);
308
309 result = add_memory(0, start_addr, map.r1.size);
310
311 if (result) {
312 DBG("%s:%d: add_memory failed: (%d)\n",
313 __func__, __LINE__, result);
314 return result;
315 }
316
317 result = online_pages(start_pfn, nr_pages);
318
319 if (result)
320 DBG("%s:%d: online_pages failed: (%d)\n",
321 __func__, __LINE__, result);
322
323 return result;
324}
325
326core_initcall(ps3_mm_add_memory);
327
328/*============================================================================*/
329/* dma routines */
330/*============================================================================*/
331
332/**
333 * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
334 * @r: pointer to dma region structure
335 * @lpar_addr: HV lpar address
336 */
337
338static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
339 unsigned long lpar_addr)
340{
341 BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
342 return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
343 : lpar_addr - map.r1.offset);
344}
345
346#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
347static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
348 int line)
349{
350 DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
351 r->did.dev_id);
352 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
353 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
354 DBG("%s:%d: len %lxh\n", func, line, r->len);
355}
356
357/**
358 * dma_chunk - A chunk of dma pages mapped by the io controller.
359 * @region - The dma region that owns this chunk.
360 * @lpar_addr: Starting lpar address of the area to map.
361 * @bus_addr: Starting ioc bus address of the area to map.
362 * @len: Length in bytes of the area to map.
363 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
364 * list of all chuncks owned by the region.
365 *
366 * This implementation uses a very simple dma page manager
367 * based on the dma_chunk structure. This scheme assumes
368 * that all drivers use very well behaved dma ops.
369 */
370
371struct dma_chunk {
372 struct ps3_dma_region *region;
373 unsigned long lpar_addr;
374 unsigned long bus_addr;
375 unsigned long len;
376 struct list_head link;
377 unsigned int usage_count;
378};
379
380#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
381static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
382 int line)
383{
384 DBG("%s:%d: r.dev %u:%u\n", func, line,
385 c->region->did.bus_id, c->region->did.dev_id);
386 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
387 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
388 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
389 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
390 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
391 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
392}
393
394static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
395 unsigned long bus_addr, unsigned long len)
396{
397 struct dma_chunk *c;
398 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
399 unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
400
401 list_for_each_entry(c, &r->chunk_list.head, link) {
402 /* intersection */
403 if (aligned_bus >= c->bus_addr
404 && aligned_bus < c->bus_addr + c->len
405 && aligned_bus + aligned_len <= c->bus_addr + c->len) {
406 return c;
407 }
408 /* below */
409 if (aligned_bus + aligned_len <= c->bus_addr) {
410 continue;
411 }
412 /* above */
413 if (aligned_bus >= c->bus_addr + c->len) {
414 continue;
415 }
416
417 /* we don't handle the multi-chunk case for now */
418
419 dma_dump_chunk(c);
420 BUG();
421 }
422 return NULL;
423}
424
425static int dma_free_chunk(struct dma_chunk *c)
426{
427 int result = 0;
428
429 if (c->bus_addr) {
430 result = lv1_unmap_device_dma_region(c->region->did.bus_id,
431 c->region->did.dev_id, c->bus_addr, c->len);
432 BUG_ON(result);
433 }
434
435 kfree(c);
436 return result;
437}
438
439/**
440 * dma_map_pages - Maps dma pages into the io controller bus address space.
441 * @r: Pointer to a struct ps3_dma_region.
442 * @phys_addr: Starting physical address of the area to map.
443 * @len: Length in bytes of the area to map.
444 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
445 *
446 * This is the lowest level dma mapping routine, and is the one that will
447 * make the HV call to add the pages into the io controller address space.
448 */
449
450static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
451 unsigned long len, struct dma_chunk **c_out)
452{
453 int result;
454 struct dma_chunk *c;
455
456 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
457
458 if (!c) {
459 result = -ENOMEM;
460 goto fail_alloc;
461 }
462
463 c->region = r;
464 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
465 c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
466 c->len = len;
467
468 result = lv1_map_device_dma_region(c->region->did.bus_id,
469 c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
470 0xf800000000000000UL);
471
472 if (result) {
473 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
474 __func__, __LINE__, ps3_result(result));
475 goto fail_map;
476 }
477
478 list_add(&c->link, &r->chunk_list.head);
479
480 *c_out = c;
481 return 0;
482
483fail_map:
484 kfree(c);
485fail_alloc:
486 *c_out = NULL;
487 DBG(" <- %s:%d\n", __func__, __LINE__);
488 return result;
489}
490
491/**
492 * dma_region_create - Create a device dma region.
493 * @r: Pointer to a struct ps3_dma_region.
494 *
495 * This is the lowest level dma region create routine, and is the one that
496 * will make the HV call to create the region.
497 */
498
499static int dma_region_create(struct ps3_dma_region* r)
500{
501 int result;
502
503 r->len = _ALIGN_UP(map.total, 1 << r->page_size);
504 INIT_LIST_HEAD(&r->chunk_list.head);
505 spin_lock_init(&r->chunk_list.lock);
506
507 result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
508 r->len, r->page_size, r->region_type, &r->bus_addr);
509
510 dma_dump_region(r);
511
512 if (result) {
513 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
514 __func__, __LINE__, ps3_result(result));
515 r->len = r->bus_addr = 0;
516 }
517
518 return result;
519}
520
521/**
522 * dma_region_free - Free a device dma region.
523 * @r: Pointer to a struct ps3_dma_region.
524 *
525 * This is the lowest level dma region free routine, and is the one that
526 * will make the HV call to free the region.
527 */
528
529static int dma_region_free(struct ps3_dma_region* r)
530{
531 int result;
532 struct dma_chunk *c;
533 struct dma_chunk *tmp;
534
535 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
536 list_del(&c->link);
537 dma_free_chunk(c);
538 }
539
540 result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
541 r->bus_addr);
542
543 if (result)
544 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
545 __func__, __LINE__, ps3_result(result));
546
547 r->len = r->bus_addr = 0;
548
549 return result;
550}
551
552/**
553 * dma_map_area - Map an area of memory into a device dma region.
554 * @r: Pointer to a struct ps3_dma_region.
555 * @virt_addr: Starting virtual address of the area to map.
556 * @len: Length in bytes of the area to map.
557 * @bus_addr: A pointer to return the starting ioc bus address of the area to
558 * map.
559 *
560 * This is the common dma mapping routine.
561 */
562
563static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
564 unsigned long len, unsigned long *bus_addr)
565{
566 int result;
567 unsigned long flags;
568 struct dma_chunk *c;
569 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
570 : virt_addr;
571
572 *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
573
574 if (!USE_DYNAMIC_DMA) {
575 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
576 DBG(" -> %s:%d\n", __func__, __LINE__);
577 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
578 virt_addr);
579 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
580 phys_addr);
581 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
582 lpar_addr);
583 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
584 DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
585 *bus_addr, len);
586 }
587
588 spin_lock_irqsave(&r->chunk_list.lock, flags);
589 c = dma_find_chunk(r, *bus_addr, len);
590
591 if (c) {
592 c->usage_count++;
593 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
594 return 0;
595 }
596
597 result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
598 _ALIGN_UP(len, 1 << r->page_size), &c);
599
600 if (result) {
601 *bus_addr = 0;
602 DBG("%s:%d: dma_map_pages failed (%d)\n",
603 __func__, __LINE__, result);
604 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
605 return result;
606 }
607
608 c->usage_count = 1;
609
610 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
611 return result;
612}
613
614/**
615 * dma_unmap_area - Unmap an area of memory from a device dma region.
616 * @r: Pointer to a struct ps3_dma_region.
617 * @bus_addr: The starting ioc bus address of the area to unmap.
618 * @len: Length in bytes of the area to unmap.
619 *
620 * This is the common dma unmap routine.
621 */
622
623int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
624 unsigned long len)
625{
626 unsigned long flags;
627 struct dma_chunk *c;
628
629 spin_lock_irqsave(&r->chunk_list.lock, flags);
630 c = dma_find_chunk(r, bus_addr, len);
631
632 if (!c) {
633 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
634 1 << r->page_size);
635 unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
636 DBG("%s:%d: not found: bus_addr %lxh\n",
637 __func__, __LINE__, bus_addr);
638 DBG("%s:%d: not found: len %lxh\n",
639 __func__, __LINE__, len);
640 DBG("%s:%d: not found: aligned_bus %lxh\n",
641 __func__, __LINE__, aligned_bus);
642 DBG("%s:%d: not found: aligned_len %lxh\n",
643 __func__, __LINE__, aligned_len);
644 BUG();
645 }
646
647 c->usage_count--;
648
649 if (!c->usage_count) {
650 list_del(&c->link);
651 dma_free_chunk(c);
652 }
653
654 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
655 return 0;
656}
657
658/**
659 * dma_region_create_linear - Setup a linear dma maping for a device.
660 * @r: Pointer to a struct ps3_dma_region.
661 *
662 * This routine creates an HV dma region for the device and maps all available
663 * ram into the io controller bus address space.
664 */
665
666static int dma_region_create_linear(struct ps3_dma_region *r)
667{
668 int result;
669 unsigned long tmp;
670
671 /* force 16M dma pages for linear mapping */
672
673 if (r->page_size != PS3_DMA_16M) {
674 pr_info("%s:%d: forcing 16M pages for linear map\n",
675 __func__, __LINE__);
676 r->page_size = PS3_DMA_16M;
677 }
678
679 result = dma_region_create(r);
680 BUG_ON(result);
681
682 result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
683 BUG_ON(result);
684
685 if (USE_LPAR_ADDR)
686 result = dma_map_area(r, map.r1.base, map.r1.size,
687 &tmp);
688 else
689 result = dma_map_area(r, map.rm.size, map.r1.size,
690 &tmp);
691
692 BUG_ON(result);
693
694 return result;
695}
696
697/**
698 * dma_region_free_linear - Free a linear dma mapping for a device.
699 * @r: Pointer to a struct ps3_dma_region.
700 *
701 * This routine will unmap all mapped areas and free the HV dma region.
702 */
703
704static int dma_region_free_linear(struct ps3_dma_region *r)
705{
706 int result;
707
708 result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
709 BUG_ON(result);
710
711 result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
712 map.r1.size);
713 BUG_ON(result);
714
715 result = dma_region_free(r);
716 BUG_ON(result);
717
718 return result;
719}
720
721/**
722 * dma_map_area_linear - Map an area of memory into a device dma region.
723 * @r: Pointer to a struct ps3_dma_region.
724 * @virt_addr: Starting virtual address of the area to map.
725 * @len: Length in bytes of the area to map.
726 * @bus_addr: A pointer to return the starting ioc bus address of the area to
727 * map.
728 *
729 * This routine just returns the coresponding bus address. Actual mapping
730 * occurs in dma_region_create_linear().
731 */
732
733static int dma_map_area_linear(struct ps3_dma_region *r,
734 unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
735{
736 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
737 : virt_addr;
738 *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
739 return 0;
740}
741
742/**
743 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
744 * @r: Pointer to a struct ps3_dma_region.
745 * @bus_addr: The starting ioc bus address of the area to unmap.
746 * @len: Length in bytes of the area to unmap.
747 *
748 * This routine does nothing. Unmapping occurs in dma_region_free_linear().
749 */
750
751static int dma_unmap_area_linear(struct ps3_dma_region *r,
752 unsigned long bus_addr, unsigned long len)
753{
754 return 0;
755}
756
757int ps3_dma_region_create(struct ps3_dma_region *r)
758{
759 return (USE_DYNAMIC_DMA)
760 ? dma_region_create(r)
761 : dma_region_create_linear(r);
762}
763
764int ps3_dma_region_free(struct ps3_dma_region *r)
765{
766 return (USE_DYNAMIC_DMA)
767 ? dma_region_free(r)
768 : dma_region_free_linear(r);
769}
770
771int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
772 unsigned long len, unsigned long *bus_addr)
773{
774 return (USE_DYNAMIC_DMA)
775 ? dma_map_area(r, virt_addr, len, bus_addr)
776 : dma_map_area_linear(r, virt_addr, len, bus_addr);
777}
778
779int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
780 unsigned long len)
781{
782 return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
783 : dma_unmap_area_linear(r, bus_addr, len);
784}
785
786/*============================================================================*/
787/* system startup routines */
788/*============================================================================*/
789
790/**
791 * ps3_mm_init - initialize the address space state variables
792 */
793
794void __init ps3_mm_init(void)
795{
796 int result;
797
798 DBG(" -> %s:%d\n", __func__, __LINE__);
799
800 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
801 &map.total);
802
803 if (result)
804 panic("ps3_repository_read_mm_info() failed");
805
806 map.rm.offset = map.rm.base;
807 map.vas_id = map.htab_size = 0;
808
809 /* this implementation assumes map.rm.base is zero */
810
811 BUG_ON(map.rm.base);
812 BUG_ON(!map.rm.size);
813
814 lmb_add(map.rm.base, map.rm.size);
815 lmb_analyze();
816
817 /* arrange to do this in ps3_mm_add_memory */
818 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
819
820 DBG(" <- %s:%d\n", __func__, __LINE__);
821}
822
823/**
824 * ps3_mm_shutdown - final cleanup of address space
825 */
826
827void ps3_mm_shutdown(void)
828{
829 ps3_mm_region_destroy(&map.r1);
830 map.total = map.rm.size;
831}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
new file mode 100644
index 000000000000..58358305dc10
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -0,0 +1,259 @@
1/*
2 * PS3 'Other OS' area data.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/io.h>
23
24#include <asm/lmb.h>
25#include <asm/ps3.h>
26
27#include "platform.h"
28
29enum {
30 OS_AREA_SEGMENT_SIZE = 0X200,
31};
32
33enum {
34 HEADER_LDR_FORMAT_RAW = 0,
35 HEADER_LDR_FORMAT_GZIP = 1,
36};
37
38/**
39 * struct os_area_header - os area header segment.
40 * @magic_num: Always 'cell_ext_os_area'.
41 * @hdr_version: Header format version number.
42 * @os_area_offset: Starting segment number of os image area.
43 * @ldr_area_offset: Starting segment number of bootloader image area.
44 * @ldr_format: HEADER_LDR_FORMAT flag.
45 * @ldr_size: Size of bootloader image in bytes.
46 *
47 * Note that the docs refer to area offsets. These are offsets in units of
48 * segments from the start of the os area (top of the header). These are
49 * better thought of as segment numbers. The os area of the os area is
50 * reserved for the os image.
51 */
52
53struct os_area_header {
54 s8 magic_num[16];
55 u32 hdr_version;
56 u32 os_area_offset;
57 u32 ldr_area_offset;
58 u32 _reserved_1;
59 u32 ldr_format;
60 u32 ldr_size;
61 u32 _reserved_2[6];
62} __attribute__ ((packed));
63
64enum {
65 PARAM_BOOT_FLAG_GAME_OS = 0,
66 PARAM_BOOT_FLAG_OTHER_OS = 1,
67};
68
69enum {
70 PARAM_AV_MULTI_OUT_NTSC = 0,
71 PARAM_AV_MULTI_OUT_PAL_RGB = 1,
72 PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
73 PARAM_AV_MULTI_OUT_SECAM = 3,
74};
75
76enum {
77 PARAM_CTRL_BUTTON_O_IS_YES = 0,
78 PARAM_CTRL_BUTTON_X_IS_YES = 1,
79};
80
81/**
82 * struct os_area_params - os area params segment.
83 * @boot_flag: User preference of operating system, PARAM_BOOT_FLAG flag.
84 * @num_params: Number of params in this (params) segment.
85 * @rtc_diff: Difference in seconds between 1970 and the ps3 rtc value.
86 * @av_multi_out: User preference of AV output, PARAM_AV_MULTI_OUT flag.
87 * @ctrl_button: User preference of controller button config, PARAM_CTRL_BUTTON
88 * flag.
89 * @static_ip_addr: User preference of static IP address.
90 * @network_mask: User preference of static network mask.
91 * @default_gateway: User preference of static default gateway.
92 * @dns_primary: User preference of static primary dns server.
93 * @dns_secondary: User preference of static secondary dns server.
94 *
95 * User preference of zero for static_ip_addr means use dhcp.
96 */
97
98struct os_area_params {
99 u32 boot_flag;
100 u32 _reserved_1[3];
101 u32 num_params;
102 u32 _reserved_2[3];
103 /* param 0 */
104 s64 rtc_diff;
105 u8 av_multi_out;
106 u8 ctrl_button;
107 u8 _reserved_3[6];
108 /* param 1 */
109 u8 static_ip_addr[4];
110 u8 network_mask[4];
111 u8 default_gateway[4];
112 u8 _reserved_4[4];
113 /* param 2 */
114 u8 dns_primary[4];
115 u8 dns_secondary[4];
116 u8 _reserved_5[8];
117} __attribute__ ((packed));
118
119/**
120 * struct saved_params - Static working copies of data from the 'Other OS' area.
121 *
122 * For the convinience of the guest, the HV makes a copy of the 'Other OS' area
123 * in flash to a high address in the boot memory region and then puts that RAM
124 * address and the byte count into the repository for retreval by the guest.
125 * We copy the data we want into a static variable and allow the memory setup
126 * by the HV to be claimed by the lmb manager.
127 */
128
129struct saved_params {
130 /* param 0 */
131 s64 rtc_diff;
132 unsigned int av_multi_out;
133 unsigned int ctrl_button;
134 /* param 1 */
135 u8 static_ip_addr[4];
136 u8 network_mask[4];
137 u8 default_gateway[4];
138 /* param 2 */
139 u8 dns_primary[4];
140 u8 dns_secondary[4];
141} static saved_params;
142
143#define dump_header(_a) _dump_header(_a, __func__, __LINE__)
144static void _dump_header(const struct os_area_header __iomem *h, const char* func,
145 int line)
146{
147 pr_debug("%s:%d: h.magic_num: '%s'\n", func, line,
148 h->magic_num);
149 pr_debug("%s:%d: h.hdr_version: %u\n", func, line,
150 h->hdr_version);
151 pr_debug("%s:%d: h.os_area_offset: %u\n", func, line,
152 h->os_area_offset);
153 pr_debug("%s:%d: h.ldr_area_offset: %u\n", func, line,
154 h->ldr_area_offset);
155 pr_debug("%s:%d: h.ldr_format: %u\n", func, line,
156 h->ldr_format);
157 pr_debug("%s:%d: h.ldr_size: %xh\n", func, line,
158 h->ldr_size);
159}
160
161#define dump_params(_a) _dump_params(_a, __func__, __LINE__)
162static void _dump_params(const struct os_area_params __iomem *p, const char* func,
163 int line)
164{
165 pr_debug("%s:%d: p.boot_flag: %u\n", func, line, p->boot_flag);
166 pr_debug("%s:%d: p.num_params: %u\n", func, line, p->num_params);
167 pr_debug("%s:%d: p.rtc_diff %ld\n", func, line, p->rtc_diff);
168 pr_debug("%s:%d: p.av_multi_out %u\n", func, line, p->av_multi_out);
169 pr_debug("%s:%d: p.ctrl_button: %u\n", func, line, p->ctrl_button);
170 pr_debug("%s:%d: p.static_ip_addr: %u.%u.%u.%u\n", func, line,
171 p->static_ip_addr[0], p->static_ip_addr[1],
172 p->static_ip_addr[2], p->static_ip_addr[3]);
173 pr_debug("%s:%d: p.network_mask: %u.%u.%u.%u\n", func, line,
174 p->network_mask[0], p->network_mask[1],
175 p->network_mask[2], p->network_mask[3]);
176 pr_debug("%s:%d: p.default_gateway: %u.%u.%u.%u\n", func, line,
177 p->default_gateway[0], p->default_gateway[1],
178 p->default_gateway[2], p->default_gateway[3]);
179 pr_debug("%s:%d: p.dns_primary: %u.%u.%u.%u\n", func, line,
180 p->dns_primary[0], p->dns_primary[1],
181 p->dns_primary[2], p->dns_primary[3]);
182 pr_debug("%s:%d: p.dns_secondary: %u.%u.%u.%u\n", func, line,
183 p->dns_secondary[0], p->dns_secondary[1],
184 p->dns_secondary[2], p->dns_secondary[3]);
185}
186
187static int __init verify_header(const struct os_area_header *header)
188{
189 if (memcmp(header->magic_num, "cell_ext_os_area", 16)) {
190 pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
191 return -1;
192 }
193
194 if (header->hdr_version < 1) {
195 pr_debug("%s:%d hdr_version failed\n", __func__, __LINE__);
196 return -1;
197 }
198
199 if (header->os_area_offset > header->ldr_area_offset) {
200 pr_debug("%s:%d offsets failed\n", __func__, __LINE__);
201 return -1;
202 }
203
204 return 0;
205}
206
207int __init ps3_os_area_init(void)
208{
209 int result;
210 u64 lpar_addr;
211 unsigned int size;
212 struct os_area_header *header;
213 struct os_area_params *params;
214
215 result = ps3_repository_read_boot_dat_info(&lpar_addr, &size);
216
217 if (result) {
218 pr_debug("%s:%d ps3_repository_read_boot_dat_info failed\n",
219 __func__, __LINE__);
220 return result;
221 }
222
223 header = (struct os_area_header *)__va(lpar_addr);
224 params = (struct os_area_params *)__va(lpar_addr + OS_AREA_SEGMENT_SIZE);
225
226 result = verify_header(header);
227
228 if (result) {
229 pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
230 dump_header(header);
231 return -EIO;
232 }
233
234 dump_header(header);
235 dump_params(params);
236
237 saved_params.rtc_diff = params->rtc_diff;
238 saved_params.av_multi_out = params->av_multi_out;
239 saved_params.ctrl_button = params->ctrl_button;
240 memcpy(saved_params.static_ip_addr, params->static_ip_addr, 4);
241 memcpy(saved_params.network_mask, params->network_mask, 4);
242 memcpy(saved_params.default_gateway, params->default_gateway, 4);
243 memcpy(saved_params.dns_secondary, params->dns_secondary, 4);
244
245 return result;
246}
247
248/**
249 * ps3_os_area_rtc_diff - Returns the ps3 rtc diff value.
250 *
251 * The ps3 rtc maintains a value that approximates seconds since
252 * 2000-01-01 00:00:00 UTC. Returns the exact number of seconds from 1970 to
253 * 2000 when saved_params.rtc_diff has not been properly set up.
254 */
255
256u64 ps3_os_area_rtc_diff(void)
257{
258 return saved_params.rtc_diff ? saved_params.rtc_diff : 946684800UL;
259}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
new file mode 100644
index 000000000000..23b111bea9d0
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -0,0 +1,68 @@
1/*
2 * PS3 platform declarations.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#if !defined(_PS3_PLATFORM_H)
22#define _PS3_PLATFORM_H
23
24#include <linux/rtc.h>
25
26/* htab */
27
28void __init ps3_hpte_init(unsigned long htab_size);
29void __init ps3_map_htab(void);
30
31/* mm */
32
33void __init ps3_mm_init(void);
34void __init ps3_mm_vas_create(unsigned long* htab_size);
35void ps3_mm_vas_destroy(void);
36void ps3_mm_shutdown(void);
37
38/* irq */
39
40void ps3_init_IRQ(void);
41void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
42
43/* smp */
44
45void smp_init_ps3(void);
46void ps3_smp_cleanup_cpu(int cpu);
47
48/* time */
49
50void __init ps3_calibrate_decr(void);
51unsigned long __init ps3_get_boot_time(void);
52void ps3_get_rtc_time(struct rtc_time *time);
53int ps3_set_rtc_time(struct rtc_time *time);
54
55/* os area */
56
57int __init ps3_os_area_init(void);
58u64 ps3_os_area_rtc_diff(void);
59
60/* spu */
61
62#if defined(CONFIG_SPU_BASE)
63void ps3_spu_set_platform (void);
64#else
65static inline void ps3_spu_set_platform (void) {}
66#endif
67
68#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
new file mode 100644
index 000000000000..273a0d621bdd
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -0,0 +1,840 @@
1/*
2 * PS3 repository routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/ps3.h>
22#include <asm/lv1call.h>
23
24enum ps3_vendor_id {
25 PS3_VENDOR_ID_NONE = 0,
26 PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
27};
28
29enum ps3_lpar_id {
30 PS3_LPAR_ID_CURRENT = 0,
31 PS3_LPAR_ID_PME = 1,
32};
33
34#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
35static void _dump_field(const char *hdr, u64 n, const char* func, int line)
36{
37#if defined(DEBUG)
38 char s[16];
39 const char *const in = (const char *)&n;
40 unsigned int i;
41
42 for (i = 0; i < 8; i++)
43 s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
44 s[i] = 0;
45
46 pr_debug("%s:%d: %s%016lx : %s\n", func, line, hdr, n, s);
47#endif
48}
49
50#define dump_node_name(_a, _b, _c, _d, _e) \
51 _dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
52static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
53 u64 n4, const char* func, int line)
54{
55 pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
56 _dump_field("n1: ", n1, func, line);
57 _dump_field("n2: ", n2, func, line);
58 _dump_field("n3: ", n3, func, line);
59 _dump_field("n4: ", n4, func, line);
60}
61
62#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
63 _dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
64static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
65 u64 v1, u64 v2, const char* func, int line)
66{
67 pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
68 _dump_field("n1: ", n1, func, line);
69 _dump_field("n2: ", n2, func, line);
70 _dump_field("n3: ", n3, func, line);
71 _dump_field("n4: ", n4, func, line);
72 pr_debug("%s:%d: v1: %016lx\n", func, line, v1);
73 pr_debug("%s:%d: v2: %016lx\n", func, line, v2);
74}
75
76/**
77 * make_first_field - Make the first field of a repository node name.
78 * @text: Text portion of the field.
79 * @index: Numeric index portion of the field. Use zero for 'don't care'.
80 *
81 * This routine sets the vendor id to zero (non-vendor specific).
82 * Returns field value.
83 */
84
85static u64 make_first_field(const char *text, u64 index)
86{
87 u64 n;
88
89 strncpy((char *)&n, text, 8);
90 return PS3_VENDOR_ID_NONE + (n >> 32) + index;
91}
92
93/**
94 * make_field - Make subsequent fields of a repository node name.
95 * @text: Text portion of the field. Use "" for 'don't care'.
96 * @index: Numeric index portion of the field. Use zero for 'don't care'.
97 *
98 * Returns field value.
99 */
100
101static u64 make_field(const char *text, u64 index)
102{
103 u64 n;
104
105 strncpy((char *)&n, text, 8);
106 return n + index;
107}
108
109/**
110 * read_node - Read a repository node from raw fields.
111 * @n1: First field of node name.
112 * @n2: Second field of node name. Use zero for 'don't care'.
113 * @n3: Third field of node name. Use zero for 'don't care'.
114 * @n4: Fourth field of node name. Use zero for 'don't care'.
115 * @v1: First repository value (high word).
116 * @v2: Second repository value (low word). Optional parameter, use zero
117 * for 'don't care'.
118 */
119
120static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
121 u64 *_v1, u64 *_v2)
122{
123 int result;
124 u64 v1;
125 u64 v2;
126
127 if (lpar_id == PS3_LPAR_ID_CURRENT) {
128 u64 id;
129 lv1_get_logical_partition_id(&id);
130 lpar_id = id;
131 }
132
133 result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
134 &v2);
135
136 if (result) {
137 pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
138 __func__, __LINE__, ps3_result(result));
139 dump_node_name(lpar_id, n1, n2, n3, n4);
140 return result;
141 }
142
143 dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
144
145 if (_v1)
146 *_v1 = v1;
147 if (_v2)
148 *_v2 = v2;
149
150 if (v1 && !_v1)
151 pr_debug("%s:%d: warning: discarding non-zero v1: %016lx\n",
152 __func__, __LINE__, v1);
153 if (v2 && !_v2)
154 pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
155 __func__, __LINE__, v2);
156
157 return result;
158}
159
160int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
161 u64 *value)
162{
163 return read_node(PS3_LPAR_ID_PME,
164 make_first_field("bus", bus_index),
165 make_field(bus_str, 0),
166 0, 0,
167 value, 0);
168}
169
170int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id)
171{
172 int result;
173 u64 v1;
174 u64 v2; /* unused */
175
176 result = read_node(PS3_LPAR_ID_PME,
177 make_first_field("bus", bus_index),
178 make_field("id", 0),
179 0, 0,
180 &v1, &v2);
181 *bus_id = v1;
182 return result;
183}
184
185int ps3_repository_read_bus_type(unsigned int bus_index,
186 enum ps3_bus_type *bus_type)
187{
188 int result;
189 u64 v1;
190
191 result = read_node(PS3_LPAR_ID_PME,
192 make_first_field("bus", bus_index),
193 make_field("type", 0),
194 0, 0,
195 &v1, 0);
196 *bus_type = v1;
197 return result;
198}
199
200int ps3_repository_read_bus_num_dev(unsigned int bus_index,
201 unsigned int *num_dev)
202{
203 int result;
204 u64 v1;
205
206 result = read_node(PS3_LPAR_ID_PME,
207 make_first_field("bus", bus_index),
208 make_field("num_dev", 0),
209 0, 0,
210 &v1, 0);
211 *num_dev = v1;
212 return result;
213}
214
215int ps3_repository_read_dev_str(unsigned int bus_index,
216 unsigned int dev_index, const char *dev_str, u64 *value)
217{
218 return read_node(PS3_LPAR_ID_PME,
219 make_first_field("bus", bus_index),
220 make_field("dev", dev_index),
221 make_field(dev_str, 0),
222 0,
223 value, 0);
224}
225
226int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
227 unsigned int *dev_id)
228{
229 int result;
230 u64 v1;
231
232 result = read_node(PS3_LPAR_ID_PME,
233 make_first_field("bus", bus_index),
234 make_field("dev", dev_index),
235 make_field("id", 0),
236 0,
237 &v1, 0);
238 *dev_id = v1;
239 return result;
240}
241
242int ps3_repository_read_dev_type(unsigned int bus_index,
243 unsigned int dev_index, enum ps3_dev_type *dev_type)
244{
245 int result;
246 u64 v1;
247
248 result = read_node(PS3_LPAR_ID_PME,
249 make_first_field("bus", bus_index),
250 make_field("dev", dev_index),
251 make_field("type", 0),
252 0,
253 &v1, 0);
254 *dev_type = v1;
255 return result;
256}
257
258int ps3_repository_read_dev_intr(unsigned int bus_index,
259 unsigned int dev_index, unsigned int intr_index,
260 unsigned int *intr_type, unsigned int* interrupt_id)
261{
262 int result;
263 u64 v1;
264 u64 v2;
265
266 result = read_node(PS3_LPAR_ID_PME,
267 make_first_field("bus", bus_index),
268 make_field("dev", dev_index),
269 make_field("intr", intr_index),
270 0,
271 &v1, &v2);
272 *intr_type = v1;
273 *interrupt_id = v2;
274 return result;
275}
276
277int ps3_repository_read_dev_reg_type(unsigned int bus_index,
278 unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type)
279{
280 int result;
281 u64 v1;
282
283 result = read_node(PS3_LPAR_ID_PME,
284 make_first_field("bus", bus_index),
285 make_field("dev", dev_index),
286 make_field("reg", reg_index),
287 make_field("type", 0),
288 &v1, 0);
289 *reg_type = v1;
290 return result;
291}
292
293int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
294 unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
295{
296 return read_node(PS3_LPAR_ID_PME,
297 make_first_field("bus", bus_index),
298 make_field("dev", dev_index),
299 make_field("reg", reg_index),
300 make_field("data", 0),
301 bus_addr, len);
302}
303
304int ps3_repository_read_dev_reg(unsigned int bus_index,
305 unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type,
306 u64 *bus_addr, u64 *len)
307{
308 int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
309 reg_index, reg_type);
310 return result ? result
311 : ps3_repository_read_dev_reg_addr(bus_index, dev_index,
312 reg_index, bus_addr, len);
313}
314
315#if defined(DEBUG)
316int ps3_repository_dump_resource_info(unsigned int bus_index,
317 unsigned int dev_index)
318{
319 int result = 0;
320 unsigned int res_index;
321
322 pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
323 bus_index, dev_index);
324
325 for (res_index = 0; res_index < 10; res_index++) {
326 enum ps3_interrupt_type intr_type;
327 unsigned int interrupt_id;
328
329 result = ps3_repository_read_dev_intr(bus_index, dev_index,
330 res_index, &intr_type, &interrupt_id);
331
332 if (result) {
333 if (result != LV1_NO_ENTRY)
334 pr_debug("%s:%d ps3_repository_read_dev_intr"
335 " (%u:%u) failed\n", __func__, __LINE__,
336 bus_index, dev_index);
337 break;
338 }
339
340 pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
341 __func__, __LINE__, bus_index, dev_index, intr_type,
342 interrupt_id);
343 }
344
345 for (res_index = 0; res_index < 10; res_index++) {
346 enum ps3_region_type reg_type;
347 u64 bus_addr;
348 u64 len;
349
350 result = ps3_repository_read_dev_reg(bus_index, dev_index,
351 res_index, &reg_type, &bus_addr, &len);
352
353 if (result) {
354 if (result != LV1_NO_ENTRY)
355 pr_debug("%s:%d ps3_repository_read_dev_reg"
356 " (%u:%u) failed\n", __func__, __LINE__,
357 bus_index, dev_index);
358 break;
359 }
360
361 pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
362 __func__, __LINE__, bus_index, dev_index, reg_type,
363 bus_addr, len);
364 }
365
366 pr_debug(" <- %s:%d\n", __func__, __LINE__);
367 return result;
368}
369
370static int dump_device_info(unsigned int bus_index, unsigned int num_dev)
371{
372 int result = 0;
373 unsigned int dev_index;
374
375 pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, bus_index);
376
377 for (dev_index = 0; dev_index < num_dev; dev_index++) {
378 enum ps3_dev_type dev_type;
379 unsigned int dev_id;
380
381 result = ps3_repository_read_dev_type(bus_index, dev_index,
382 &dev_type);
383
384 if (result) {
385 pr_debug("%s:%d ps3_repository_read_dev_type"
386 " (%u:%u) failed\n", __func__, __LINE__,
387 bus_index, dev_index);
388 break;
389 }
390
391 result = ps3_repository_read_dev_id(bus_index, dev_index,
392 &dev_id);
393
394 if (result) {
395 pr_debug("%s:%d ps3_repository_read_dev_id"
396 " (%u:%u) failed\n", __func__, __LINE__,
397 bus_index, dev_index);
398 continue;
399 }
400
401 pr_debug("%s:%d (%u:%u): dev_type %u, dev_id %u\n", __func__,
402 __LINE__, bus_index, dev_index, dev_type, dev_id);
403
404 ps3_repository_dump_resource_info(bus_index, dev_index);
405 }
406
407 pr_debug(" <- %s:%d\n", __func__, __LINE__);
408 return result;
409}
410
411int ps3_repository_dump_bus_info(void)
412{
413 int result = 0;
414 unsigned int bus_index;
415
416 pr_debug(" -> %s:%d\n", __func__, __LINE__);
417
418 for (bus_index = 0; bus_index < 10; bus_index++) {
419 enum ps3_bus_type bus_type;
420 unsigned int bus_id;
421 unsigned int num_dev;
422
423 result = ps3_repository_read_bus_type(bus_index, &bus_type);
424
425 if (result) {
426 pr_debug("%s:%d read_bus_type(%u) failed\n",
427 __func__, __LINE__, bus_index);
428 break;
429 }
430
431 result = ps3_repository_read_bus_id(bus_index, &bus_id);
432
433 if (result) {
434 pr_debug("%s:%d read_bus_id(%u) failed\n",
435 __func__, __LINE__, bus_index);
436 continue;
437 }
438
439 if (bus_index != bus_id)
440 pr_debug("%s:%d bus_index != bus_id\n",
441 __func__, __LINE__);
442
443 result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
444
445 if (result) {
446 pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
447 __func__, __LINE__, bus_index);
448 continue;
449 }
450
451 pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
452 __func__, __LINE__, bus_index, bus_type, bus_id,
453 num_dev);
454
455 dump_device_info(bus_index, num_dev);
456 }
457
458 pr_debug(" <- %s:%d\n", __func__, __LINE__);
459 return result;
460}
461#endif /* defined(DEBUG) */
462
463static int find_device(unsigned int bus_index, unsigned int num_dev,
464 unsigned int start_dev_index, enum ps3_dev_type dev_type,
465 struct ps3_repository_device *dev)
466{
467 int result = 0;
468 unsigned int dev_index;
469
470 pr_debug("%s:%d: find dev_type %u\n", __func__, __LINE__, dev_type);
471
472 dev->dev_index = UINT_MAX;
473
474 for (dev_index = start_dev_index; dev_index < num_dev; dev_index++) {
475 enum ps3_dev_type x;
476
477 result = ps3_repository_read_dev_type(bus_index, dev_index,
478 &x);
479
480 if (result) {
481 pr_debug("%s:%d read_dev_type failed\n",
482 __func__, __LINE__);
483 return result;
484 }
485
486 if (x == dev_type)
487 break;
488 }
489
490 BUG_ON(dev_index == num_dev);
491
492 pr_debug("%s:%d: found dev_type %u at dev_index %u\n",
493 __func__, __LINE__, dev_type, dev_index);
494
495 result = ps3_repository_read_dev_id(bus_index, dev_index,
496 &dev->did.dev_id);
497
498 if (result) {
499 pr_debug("%s:%d read_dev_id failed\n",
500 __func__, __LINE__);
501 return result;
502 }
503
504 dev->dev_index = dev_index;
505
506 pr_debug("%s:%d found: dev_id %u\n", __func__, __LINE__,
507 dev->did.dev_id);
508
509 return result;
510}
511
512int ps3_repository_find_device (enum ps3_bus_type bus_type,
513 enum ps3_dev_type dev_type,
514 const struct ps3_repository_device *start_dev,
515 struct ps3_repository_device *dev)
516{
517 int result = 0;
518 unsigned int bus_index;
519 unsigned int num_dev;
520
521 pr_debug("%s:%d: find bus_type %u, dev_type %u\n", __func__, __LINE__,
522 bus_type, dev_type);
523
524 dev->bus_index = UINT_MAX;
525
526 for (bus_index = start_dev ? start_dev->bus_index : 0; bus_index < 10;
527 bus_index++) {
528 enum ps3_bus_type x;
529
530 result = ps3_repository_read_bus_type(bus_index, &x);
531
532 if (result) {
533 pr_debug("%s:%d read_bus_type failed\n",
534 __func__, __LINE__);
535 return result;
536 }
537 if (x == bus_type)
538 break;
539 }
540
541 BUG_ON(bus_index == 10);
542
543 pr_debug("%s:%d: found bus_type %u at bus_index %u\n",
544 __func__, __LINE__, bus_type, bus_index);
545
546 result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
547
548 if (result) {
549 pr_debug("%s:%d read_bus_num_dev failed\n",
550 __func__, __LINE__);
551 return result;
552 }
553
554 result = find_device(bus_index, num_dev, start_dev
555 ? start_dev->dev_index + 1 : 0, dev_type, dev);
556
557 if (result) {
558 pr_debug("%s:%d get_did failed\n", __func__, __LINE__);
559 return result;
560 }
561
562 result = ps3_repository_read_bus_id(bus_index, &dev->did.bus_id);
563
564 if (result) {
565 pr_debug("%s:%d read_bus_id failed\n",
566 __func__, __LINE__);
567 return result;
568 }
569
570 dev->bus_index = bus_index;
571
572 pr_debug("%s:%d found: bus_id %u, dev_id %u\n",
573 __func__, __LINE__, dev->did.bus_id, dev->did.dev_id);
574
575 return result;
576}
577
578int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
579 enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
580{
581 int result = 0;
582 unsigned int res_index;
583
584 pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
585
586 *interrupt_id = UINT_MAX;
587
588 for (res_index = 0; res_index < 10; res_index++) {
589 enum ps3_interrupt_type t;
590 unsigned int id;
591
592 result = ps3_repository_read_dev_intr(dev->bus_index,
593 dev->dev_index, res_index, &t, &id);
594
595 if (result) {
596 pr_debug("%s:%d read_dev_intr failed\n",
597 __func__, __LINE__);
598 return result;
599 }
600
601 if (t == intr_type) {
602 *interrupt_id = id;
603 break;
604 }
605 }
606
607 BUG_ON(res_index == 10);
608
609 pr_debug("%s:%d: found intr_type %u at res_index %u\n",
610 __func__, __LINE__, intr_type, res_index);
611
612 return result;
613}
614
615int ps3_repository_find_region(const struct ps3_repository_device *dev,
616 enum ps3_region_type reg_type, u64 *bus_addr, u64 *len)
617{
618 int result = 0;
619 unsigned int res_index;
620
621 pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
622
623 *bus_addr = *len = 0;
624
625 for (res_index = 0; res_index < 10; res_index++) {
626 enum ps3_region_type t;
627 u64 a;
628 u64 l;
629
630 result = ps3_repository_read_dev_reg(dev->bus_index,
631 dev->dev_index, res_index, &t, &a, &l);
632
633 if (result) {
634 pr_debug("%s:%d read_dev_reg failed\n",
635 __func__, __LINE__);
636 return result;
637 }
638
639 if (t == reg_type) {
640 *bus_addr = a;
641 *len = l;
642 break;
643 }
644 }
645
646 BUG_ON(res_index == 10);
647
648 pr_debug("%s:%d: found reg_type %u at res_index %u\n",
649 __func__, __LINE__, reg_type, res_index);
650
651 return result;
652}
653
654int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
655{
656 return read_node(PS3_LPAR_ID_CURRENT,
657 make_first_field("bi", 0),
658 make_field("pu", 0),
659 ppe_id,
660 make_field("rm_size", 0),
661 rm_size, 0);
662}
663
664int ps3_repository_read_region_total(u64 *region_total)
665{
666 return read_node(PS3_LPAR_ID_CURRENT,
667 make_first_field("bi", 0),
668 make_field("rgntotal", 0),
669 0, 0,
670 region_total, 0);
671}
672
673/**
674 * ps3_repository_read_mm_info - Read mm info for single pu system.
675 * @rm_base: Real mode memory base address.
676 * @rm_size: Real mode memory size.
677 * @region_total: Maximum memory region size.
678 */
679
680int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
681{
682 int result;
683 u64 ppe_id;
684
685 lv1_get_logical_ppe_id(&ppe_id);
686 *rm_base = 0;
687 result = ps3_repository_read_rm_size(ppe_id, rm_size);
688 return result ? result
689 : ps3_repository_read_region_total(region_total);
690}
691
692/**
693 * ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
694 * @num_spu: Number of physical spus.
695 */
696
697int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
698{
699 int result;
700 u64 v1;
701
702 result = read_node(PS3_LPAR_ID_CURRENT,
703 make_first_field("bi", 0),
704 make_field("spun", 0),
705 0, 0,
706 &v1, 0);
707 *num_spu_reserved = v1;
708 return result;
709}
710
711/**
712 * ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
713 * @num_resource_id: Number of spu resource ids.
714 */
715
716int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
717{
718 int result;
719 u64 v1;
720
721 result = read_node(PS3_LPAR_ID_CURRENT,
722 make_first_field("bi", 0),
723 make_field("spursvn", 0),
724 0, 0,
725 &v1, 0);
726 *num_resource_id = v1;
727 return result;
728}
729
730/**
731 * ps3_repository_read_spu_resource_id - spu resource reservation id value.
732 * @res_index: Resource reservation index.
733 * @resource_type: Resource reservation type.
734 * @resource_id: Resource reservation id.
735 */
736
737int ps3_repository_read_spu_resource_id(unsigned int res_index,
738 enum ps3_spu_resource_type* resource_type, unsigned int *resource_id)
739{
740 int result;
741 u64 v1;
742 u64 v2;
743
744 result = read_node(PS3_LPAR_ID_CURRENT,
745 make_first_field("bi", 0),
746 make_field("spursv", 0),
747 res_index,
748 0,
749 &v1, &v2);
750 *resource_type = v1;
751 *resource_id = v2;
752 return result;
753}
754
755int ps3_repository_read_boot_dat_address(u64 *address)
756{
757 return read_node(PS3_LPAR_ID_CURRENT,
758 make_first_field("bi", 0),
759 make_field("boot_dat", 0),
760 make_field("address", 0),
761 0,
762 address, 0);
763}
764
765int ps3_repository_read_boot_dat_size(unsigned int *size)
766{
767 int result;
768 u64 v1;
769
770 result = read_node(PS3_LPAR_ID_CURRENT,
771 make_first_field("bi", 0),
772 make_field("boot_dat", 0),
773 make_field("size", 0),
774 0,
775 &v1, 0);
776 *size = v1;
777 return result;
778}
779
780/**
781 * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
782 * address: lpar address of cell_ext_os_area
783 * @size: size of cell_ext_os_area
784 */
785
786int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
787{
788 int result;
789
790 *size = 0;
791 result = ps3_repository_read_boot_dat_address(lpar_addr);
792 return result ? result
793 : ps3_repository_read_boot_dat_size(size);
794}
795
796int ps3_repository_read_num_be(unsigned int *num_be)
797{
798 int result;
799 u64 v1;
800
801 result = read_node(PS3_LPAR_ID_PME,
802 make_first_field("ben", 0),
803 0,
804 0,
805 0,
806 &v1, 0);
807 *num_be = v1;
808 return result;
809}
810
811int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
812{
813 return read_node(PS3_LPAR_ID_PME,
814 make_first_field("be", be_index),
815 0,
816 0,
817 0,
818 node_id, 0);
819}
820
821int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
822{
823 return read_node(PS3_LPAR_ID_PME,
824 make_first_field("be", 0),
825 node_id,
826 make_field("clock", 0),
827 0,
828 tb_freq, 0);
829}
830
831int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
832{
833 int result;
834 u64 node_id;
835
836 *tb_freq = 0;
837 result = ps3_repository_read_be_node_id(0, &node_id);
838 return result ? result
839 : ps3_repository_read_tb_freq(node_id, tb_freq);
840}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
new file mode 100644
index 000000000000..d8b5cadbe80e
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -0,0 +1,173 @@
1/*
2 * PS3 platform setup routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/fs.h>
24#include <linux/root_dev.h>
25#include <linux/console.h>
26#include <linux/kexec.h>
27
28#include <asm/machdep.h>
29#include <asm/firmware.h>
30#include <asm/time.h>
31#include <asm/iommu.h>
32#include <asm/udbg.h>
33#include <asm/prom.h>
34#include <asm/lv1call.h>
35
36#include "platform.h"
37
38#if defined(DEBUG)
39#define DBG(fmt...) udbg_printf(fmt)
40#else
41#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
42#endif
43
44static void ps3_show_cpuinfo(struct seq_file *m)
45{
46 seq_printf(m, "machine\t\t: %s\n", ppc_md.name);
47}
48
49static void ps3_power_save(void)
50{
51 /*
52 * lv1_pause() puts the PPE thread into inactive state until an
53 * irq on an unmasked plug exists. MSR[EE] has no effect.
54 * flags: 0 = wake on DEC interrupt, 1 = ignore DEC interrupt.
55 */
56
57 lv1_pause(0);
58}
59
60static void ps3_panic(char *str)
61{
62 DBG("%s:%d %s\n", __func__, __LINE__, str);
63
64#ifdef CONFIG_SMP
65 smp_send_stop();
66#endif
67 printk("\n");
68 printk(" System does not reboot automatically.\n");
69 printk(" Please press POWER button.\n");
70 printk("\n");
71
72 for (;;) ;
73}
74
75static void __init ps3_setup_arch(void)
76{
77 DBG(" -> %s:%d\n", __func__, __LINE__);
78
79 ps3_spu_set_platform();
80 ps3_map_htab();
81
82#ifdef CONFIG_SMP
83 smp_init_ps3();
84#endif
85
86#ifdef CONFIG_DUMMY_CONSOLE
87 conswitchp = &dummy_con;
88#endif
89
90 ppc_md.power_save = ps3_power_save;
91
92 DBG(" <- %s:%d\n", __func__, __LINE__);
93}
94
95static void __init ps3_progress(char *s, unsigned short hex)
96{
97 printk("*** %04x : %s\n", hex, s ? s : "");
98}
99
100static int __init ps3_probe(void)
101{
102 unsigned long htab_size;
103 unsigned long dt_root;
104
105 DBG(" -> %s:%d\n", __func__, __LINE__);
106
107 dt_root = of_get_flat_dt_root();
108 if (!of_flat_dt_is_compatible(dt_root, "PS3"))
109 return 0;
110
111 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
112
113 ps3_os_area_init();
114 ps3_mm_init();
115 ps3_mm_vas_create(&htab_size);
116 ps3_hpte_init(htab_size);
117
118 DBG(" <- %s:%d\n", __func__, __LINE__);
119 return 1;
120}
121
122#if defined(CONFIG_KEXEC)
123static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
124{
125 DBG(" -> %s:%d\n", __func__, __LINE__);
126
127 if (secondary) {
128 int cpu;
129 for_each_online_cpu(cpu)
130 if (cpu)
131 ps3_smp_cleanup_cpu(cpu);
132 } else
133 ps3_smp_cleanup_cpu(0);
134
135 DBG(" <- %s:%d\n", __func__, __LINE__);
136}
137
138static void ps3_machine_kexec(struct kimage *image)
139{
140 unsigned long ppe_id;
141
142 DBG(" -> %s:%d\n", __func__, __LINE__);
143
144 lv1_get_logical_ppe_id(&ppe_id);
145 lv1_configure_irq_state_bitmap(ppe_id, 0, 0);
146 ps3_mm_shutdown();
147 ps3_mm_vas_destroy();
148
149 default_machine_kexec(image);
150
151 DBG(" <- %s:%d\n", __func__, __LINE__);
152}
153#endif
154
155define_machine(ps3) {
156 .name = "PS3",
157 .probe = ps3_probe,
158 .setup_arch = ps3_setup_arch,
159 .show_cpuinfo = ps3_show_cpuinfo,
160 .init_IRQ = ps3_init_IRQ,
161 .panic = ps3_panic,
162 .get_boot_time = ps3_get_boot_time,
163 .set_rtc_time = ps3_set_rtc_time,
164 .get_rtc_time = ps3_get_rtc_time,
165 .calibrate_decr = ps3_calibrate_decr,
166 .progress = ps3_progress,
167#if defined(CONFIG_KEXEC)
168 .kexec_cpu_down = ps3_kexec_cpu_down,
169 .machine_kexec = ps3_machine_kexec,
170 .machine_kexec_prepare = default_machine_kexec_prepare,
171 .machine_crash_shutdown = default_machine_crash_shutdown,
172#endif
173};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
new file mode 100644
index 000000000000..11d2080607ed
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -0,0 +1,158 @@
1/*
2 * PS3 SMP routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/smp.h>
23
24#include <asm/machdep.h>
25#include <asm/udbg.h>
26#include <asm/ps3.h>
27
28#include "platform.h"
29
30#if defined(DEBUG)
31#define DBG(fmt...) udbg_printf(fmt)
32#else
33#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
34#endif
35
36static irqreturn_t ipi_function_handler(int irq, void *msg)
37{
38 smp_message_recv((int)(long)msg);
39 return IRQ_HANDLED;
40}
41
42/**
43 * virqs - a per cpu array of virqs for ipi use
44 */
45
46#define MSG_COUNT 4
47static DEFINE_PER_CPU(unsigned int, virqs[MSG_COUNT]);
48
49static const char *names[MSG_COUNT] = {
50 "ipi call",
51 "ipi reschedule",
52 "ipi migrate",
53 "ipi debug brk"
54};
55
56static void do_message_pass(int target, int msg)
57{
58 int result;
59 unsigned int virq;
60
61 if (msg >= MSG_COUNT) {
62 DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg);
63 return;
64 }
65
66 virq = per_cpu(virqs, target)[msg];
67 result = ps3_send_event_locally(virq);
68
69 if (result)
70 DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
71 " (%d)\n", __func__, __LINE__, target, msg, result);
72}
73
74static void ps3_smp_message_pass(int target, int msg)
75{
76 int cpu;
77
78 if (target < NR_CPUS)
79 do_message_pass(target, msg);
80 else if (target == MSG_ALL_BUT_SELF) {
81 for_each_online_cpu(cpu)
82 if (cpu != smp_processor_id())
83 do_message_pass(cpu, msg);
84 } else {
85 for_each_online_cpu(cpu)
86 do_message_pass(cpu, msg);
87 }
88}
89
90static int ps3_smp_probe(void)
91{
92 return 2;
93}
94
95static void __init ps3_smp_setup_cpu(int cpu)
96{
97 int result;
98 unsigned int *virqs = per_cpu(virqs, cpu);
99 int i;
100
101 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
102
103 /*
104 * Check assumptions on virqs[] indexing. If this
105 * check fails, then a different mapping of PPC_MSG_
106 * to index needs to be setup.
107 */
108
109 BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
110 BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
111 BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
112
113 for (i = 0; i < MSG_COUNT; i++) {
114 result = ps3_alloc_event_irq(&virqs[i]);
115
116 if (result)
117 continue;
118
119 DBG("%s:%d: (%d, %d) => virq %u\n",
120 __func__, __LINE__, cpu, i, virqs[i]);
121
122
123 request_irq(virqs[i], ipi_function_handler, IRQF_DISABLED,
124 names[i], (void*)(long)i);
125 }
126
127 ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
128
129 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
130}
131
132void ps3_smp_cleanup_cpu(int cpu)
133{
134 unsigned int *virqs = per_cpu(virqs, cpu);
135 int i;
136
137 DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
138 for (i = 0; i < MSG_COUNT; i++) {
139 ps3_free_event_irq(virqs[i]);
140 free_irq(virqs[i], (void*)(long)i);
141 virqs[i] = NO_IRQ;
142 }
143 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
144}
145
146static struct smp_ops_t ps3_smp_ops = {
147 .probe = ps3_smp_probe,
148 .message_pass = ps3_smp_message_pass,
149 .kick_cpu = smp_generic_kick_cpu,
150 .setup_cpu = ps3_smp_setup_cpu,
151};
152
153void smp_init_ps3(void)
154{
155 DBG(" -> %s\n", __func__);
156 smp_ops = &ps3_smp_ops;
157 DBG(" <- %s\n", __func__);
158}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
new file mode 100644
index 000000000000..644532c3b7c4
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -0,0 +1,613 @@
1/*
2 * PS3 Platform spu routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mmzone.h>
24#include <linux/io.h>
25#include <linux/mm.h>
26
27#include <asm/spu.h>
28#include <asm/spu_priv1.h>
29#include <asm/ps3.h>
30#include <asm/lv1call.h>
31
32/* spu_management_ops */
33
34/**
35 * enum spe_type - Type of spe to create.
36 * @spe_type_logical: Standard logical spe.
37 *
38 * For use with lv1_construct_logical_spe(). The current HV does not support
39 * any types other than those listed.
40 */
41
42enum spe_type {
43 SPE_TYPE_LOGICAL = 0,
44};
45
46/**
47 * struct spe_shadow - logical spe shadow register area.
48 *
49 * Read-only shadow of spe registers.
50 */
51
52struct spe_shadow {
53 u8 padding_0000[0x0140];
54 u64 int_status_class0_RW; /* 0x0140 */
55 u64 int_status_class1_RW; /* 0x0148 */
56 u64 int_status_class2_RW; /* 0x0150 */
57 u8 padding_0158[0x0610-0x0158];
58 u64 mfc_dsisr_RW; /* 0x0610 */
59 u8 padding_0618[0x0620-0x0618];
60 u64 mfc_dar_RW; /* 0x0620 */
61 u8 padding_0628[0x0800-0x0628];
62 u64 mfc_dsipr_R; /* 0x0800 */
63 u8 padding_0808[0x0810-0x0808];
64 u64 mfc_lscrr_R; /* 0x0810 */
65 u8 padding_0818[0x0c00-0x0818];
66 u64 mfc_cer_R; /* 0x0c00 */
67 u8 padding_0c08[0x0f00-0x0c08];
68 u64 spe_execution_status; /* 0x0f00 */
69 u8 padding_0f08[0x1000-0x0f08];
70} __attribute__ ((packed));
71
72
73/**
74 * enum spe_ex_state - Logical spe execution state.
75 * @spe_ex_state_unexecutable: Uninitialized.
76 * @spe_ex_state_executable: Enabled, not ready.
77 * @spe_ex_state_executed: Ready for use.
78 *
79 * The execution state (status) of the logical spe as reported in
80 * struct spe_shadow:spe_execution_status.
81 */
82
83enum spe_ex_state {
84 SPE_EX_STATE_UNEXECUTABLE = 0,
85 SPE_EX_STATE_EXECUTABLE = 2,
86 SPE_EX_STATE_EXECUTED = 3,
87};
88
89/**
90 * struct priv1_cache - Cached values of priv1 registers.
91 * @masks[]: Array of cached spe interrupt masks, indexed by class.
92 * @sr1: Cached mfc_sr1 register.
93 * @tclass_id: Cached mfc_tclass_id register.
94 */
95
96struct priv1_cache {
97 u64 masks[3];
98 u64 sr1;
99 u64 tclass_id;
100};
101
102/**
103 * struct spu_pdata - Platform state variables.
104 * @spe_id: HV spe id returned by lv1_construct_logical_spe().
105 * @resource_id: HV spe resource id returned by
106 * ps3_repository_read_spe_resource_id().
107 * @priv2_addr: lpar address of spe priv2 area returned by
108 * lv1_construct_logical_spe().
109 * @shadow_addr: lpar address of spe register shadow area returned by
110 * lv1_construct_logical_spe().
111 * @shadow: Virtual (ioremap) address of spe register shadow area.
112 * @cache: Cached values of priv1 registers.
113 */
114
115struct spu_pdata {
116 u64 spe_id;
117 u64 resource_id;
118 u64 priv2_addr;
119 u64 shadow_addr;
120 struct spe_shadow __iomem *shadow;
121 struct priv1_cache cache;
122};
123
124static struct spu_pdata *spu_pdata(struct spu *spu)
125{
126 return spu->pdata;
127}
128
129#define dump_areas(_a, _b, _c, _d, _e) \
130 _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
131static void _dump_areas(unsigned int spe_id, unsigned long priv2,
132 unsigned long problem, unsigned long ls, unsigned long shadow,
133 const char* func, int line)
134{
135 pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id);
136 pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2);
137 pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
138 pr_debug("%s:%d: ls: %lxh\n", func, line, ls);
139 pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
140}
141
142static unsigned long get_vas_id(void)
143{
144 unsigned long id;
145
146 lv1_get_logical_ppe_id(&id);
147 lv1_get_virtual_address_space_id_of_ppe(id, &id);
148
149 return id;
150}
151
152static int __init construct_spu(struct spu *spu)
153{
154 int result;
155 unsigned long unused;
156
157 result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
158 PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
159 &spu_pdata(spu)->priv2_addr, &spu->problem_phys,
160 &spu->local_store_phys, &unused,
161 &spu_pdata(spu)->shadow_addr,
162 &spu_pdata(spu)->spe_id);
163
164 if (result) {
165 pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
166 __func__, __LINE__, ps3_result(result));
167 return result;
168 }
169
170 return result;
171}
172
173static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
174{
175 int result;
176 unsigned long start_pfn;
177 unsigned long nr_pages;
178 struct pglist_data *pgdata;
179 struct zone *zone;
180
181 BUG_ON(!mem_init_done);
182
183 start_pfn = start_addr >> PAGE_SHIFT;
184 nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
185
186 pgdata = NODE_DATA(0);
187 zone = pgdata->node_zones;
188
189 result = __add_pages(zone, start_pfn, nr_pages);
190
191 if (result)
192 pr_debug("%s:%d: __add_pages failed: (%d)\n",
193 __func__, __LINE__, result);
194
195 return result;
196}
197
198static void spu_unmap(struct spu *spu)
199{
200 iounmap(spu->priv2);
201 iounmap(spu->problem);
202 iounmap((__force u8 __iomem *)spu->local_store);
203 iounmap(spu_pdata(spu)->shadow);
204}
205
206static int __init setup_areas(struct spu *spu)
207{
208 struct table {char* name; unsigned long addr; unsigned long size;};
209 int result;
210
211 /* setup pages */
212
213 result = add_spu_pages(spu->local_store_phys, LS_SIZE);
214 if (result)
215 goto fail_add;
216
217 result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
218 if (result)
219 goto fail_add;
220
221 /* ioremap */
222
223 spu_pdata(spu)->shadow = __ioremap(
224 spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
225 PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
226 if (!spu_pdata(spu)->shadow) {
227 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
228 goto fail_ioremap;
229 }
230
231 spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
232 if (!spu->local_store) {
233 pr_debug("%s:%d: ioremap local_store failed\n",
234 __func__, __LINE__);
235 goto fail_ioremap;
236 }
237
238 spu->problem = ioremap(spu->problem_phys,
239 sizeof(struct spu_problem));
240 if (!spu->problem) {
241 pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
242 goto fail_ioremap;
243 }
244
245 spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
246 sizeof(struct spu_priv2));
247 if (!spu->priv2) {
248 pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
249 goto fail_ioremap;
250 }
251
252 dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
253 spu->problem_phys, spu->local_store_phys,
254 spu_pdata(spu)->shadow_addr);
255 dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
256 (unsigned long)spu->problem, (unsigned long)spu->local_store,
257 (unsigned long)spu_pdata(spu)->shadow);
258
259 return 0;
260
261fail_ioremap:
262 spu_unmap(spu);
263fail_add:
264 return result;
265}
266
267static int __init setup_interrupts(struct spu *spu)
268{
269 int result;
270
271 result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 0,
272 &spu->irqs[0]);
273
274 if (result)
275 goto fail_alloc_0;
276
277 result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 1,
278 &spu->irqs[1]);
279
280 if (result)
281 goto fail_alloc_1;
282
283 result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 2,
284 &spu->irqs[2]);
285
286 if (result)
287 goto fail_alloc_2;
288
289 return result;
290
291fail_alloc_2:
292 ps3_free_spe_irq(spu->irqs[1]);
293fail_alloc_1:
294 ps3_free_spe_irq(spu->irqs[0]);
295fail_alloc_0:
296 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
297 return result;
298}
299
300static int __init enable_spu(struct spu *spu)
301{
302 int result;
303
304 result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
305 spu_pdata(spu)->resource_id);
306
307 if (result) {
308 pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
309 __func__, __LINE__, ps3_result(result));
310 goto fail_enable;
311 }
312
313 result = setup_areas(spu);
314
315 if (result)
316 goto fail_areas;
317
318 result = setup_interrupts(spu);
319
320 if (result)
321 goto fail_interrupts;
322
323 return 0;
324
325fail_interrupts:
326 spu_unmap(spu);
327fail_areas:
328 lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
329fail_enable:
330 return result;
331}
332
333static int ps3_destroy_spu(struct spu *spu)
334{
335 int result;
336
337 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
338
339 result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
340 BUG_ON(result);
341
342 ps3_free_spe_irq(spu->irqs[2]);
343 ps3_free_spe_irq(spu->irqs[1]);
344 ps3_free_spe_irq(spu->irqs[0]);
345
346 spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
347
348 spu_unmap(spu);
349
350 result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
351 BUG_ON(result);
352
353 kfree(spu->pdata);
354 spu->pdata = NULL;
355
356 return 0;
357}
358
359static int __init ps3_create_spu(struct spu *spu, void *data)
360{
361 int result;
362
363 pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
364
365 spu->pdata = kzalloc(sizeof(struct spu_pdata),
366 GFP_KERNEL);
367
368 if (!spu->pdata) {
369 result = -ENOMEM;
370 goto fail_malloc;
371 }
372
373 spu_pdata(spu)->resource_id = (unsigned long)data;
374
375 /* Init cached reg values to HV defaults. */
376
377 spu_pdata(spu)->cache.sr1 = 0x33;
378
379 result = construct_spu(spu);
380
381 if (result)
382 goto fail_construct;
383
384 /* For now, just go ahead and enable it. */
385
386 result = enable_spu(spu);
387
388 if (result)
389 goto fail_enable;
390
391 /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
392
393 /* need something better here!!! */
394 while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
395 != SPE_EX_STATE_EXECUTED)
396 (void)0;
397
398 return result;
399
400fail_enable:
401fail_construct:
402 ps3_destroy_spu(spu);
403fail_malloc:
404 return result;
405}
406
407static int __init ps3_enumerate_spus(int (*fn)(void *data))
408{
409 int result;
410 unsigned int num_resource_id;
411 unsigned int i;
412
413 result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
414
415 pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
416 num_resource_id);
417
418 /*
419 * For now, just create logical spus equal to the number
420 * of physical spus reserved for the partition.
421 */
422
423 for (i = 0; i < num_resource_id; i++) {
424 enum ps3_spu_resource_type resource_type;
425 unsigned int resource_id;
426
427 result = ps3_repository_read_spu_resource_id(i,
428 &resource_type, &resource_id);
429
430 if (result)
431 break;
432
433 if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
434 result = fn((void*)(unsigned long)resource_id);
435
436 if (result)
437 break;
438 }
439 }
440
441 if (result)
442 printk(KERN_WARNING "%s:%d: Error initializing spus\n",
443 __func__, __LINE__);
444
445 return result;
446}
447
448const struct spu_management_ops spu_management_ps3_ops = {
449 .enumerate_spus = ps3_enumerate_spus,
450 .create_spu = ps3_create_spu,
451 .destroy_spu = ps3_destroy_spu,
452};
453
454/* spu_priv1_ops */
455
456static void int_mask_and(struct spu *spu, int class, u64 mask)
457{
458 u64 old_mask;
459
460 /* are these serialized by caller??? */
461 old_mask = spu_int_mask_get(spu, class);
462 spu_int_mask_set(spu, class, old_mask & mask);
463}
464
465static void int_mask_or(struct spu *spu, int class, u64 mask)
466{
467 u64 old_mask;
468
469 old_mask = spu_int_mask_get(spu, class);
470 spu_int_mask_set(spu, class, old_mask | mask);
471}
472
473static void int_mask_set(struct spu *spu, int class, u64 mask)
474{
475 spu_pdata(spu)->cache.masks[class] = mask;
476 lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
477 spu_pdata(spu)->cache.masks[class]);
478}
479
480static u64 int_mask_get(struct spu *spu, int class)
481{
482 return spu_pdata(spu)->cache.masks[class];
483}
484
485static void int_stat_clear(struct spu *spu, int class, u64 stat)
486{
487 /* Note that MFC_DSISR will be cleared when class1[MF] is set. */
488
489 lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
490 stat, 0);
491}
492
493static u64 int_stat_get(struct spu *spu, int class)
494{
495 u64 stat;
496
497 lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
498 return stat;
499}
500
501static void cpu_affinity_set(struct spu *spu, int cpu)
502{
503 /* No support. */
504}
505
506static u64 mfc_dar_get(struct spu *spu)
507{
508 return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
509}
510
511static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
512{
513 /* Nothing to do, cleared in int_stat_clear(). */
514}
515
516static u64 mfc_dsisr_get(struct spu *spu)
517{
518 return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
519}
520
521static void mfc_sdr_setup(struct spu *spu)
522{
523 /* Nothing to do. */
524}
525
526static void mfc_sr1_set(struct spu *spu, u64 sr1)
527{
528 /* Check bits allowed by HV. */
529
530 static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
531 | MFC_STATE1_PROBLEM_STATE_MASK);
532
533 BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
534
535 spu_pdata(spu)->cache.sr1 = sr1;
536 lv1_set_spe_privilege_state_area_1_register(
537 spu_pdata(spu)->spe_id,
538 offsetof(struct spu_priv1, mfc_sr1_RW),
539 spu_pdata(spu)->cache.sr1);
540}
541
542static u64 mfc_sr1_get(struct spu *spu)
543{
544 return spu_pdata(spu)->cache.sr1;
545}
546
547static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
548{
549 spu_pdata(spu)->cache.tclass_id = tclass_id;
550 lv1_set_spe_privilege_state_area_1_register(
551 spu_pdata(spu)->spe_id,
552 offsetof(struct spu_priv1, mfc_tclass_id_RW),
553 spu_pdata(spu)->cache.tclass_id);
554}
555
556static u64 mfc_tclass_id_get(struct spu *spu)
557{
558 return spu_pdata(spu)->cache.tclass_id;
559}
560
561static void tlb_invalidate(struct spu *spu)
562{
563 /* Nothing to do. */
564}
565
566static void resource_allocation_groupID_set(struct spu *spu, u64 id)
567{
568 /* No support. */
569}
570
571static u64 resource_allocation_groupID_get(struct spu *spu)
572{
573 return 0; /* No support. */
574}
575
576static void resource_allocation_enable_set(struct spu *spu, u64 enable)
577{
578 /* No support. */
579}
580
581static u64 resource_allocation_enable_get(struct spu *spu)
582{
583 return 0; /* No support. */
584}
585
586const struct spu_priv1_ops spu_priv1_ps3_ops = {
587 .int_mask_and = int_mask_and,
588 .int_mask_or = int_mask_or,
589 .int_mask_set = int_mask_set,
590 .int_mask_get = int_mask_get,
591 .int_stat_clear = int_stat_clear,
592 .int_stat_get = int_stat_get,
593 .cpu_affinity_set = cpu_affinity_set,
594 .mfc_dar_get = mfc_dar_get,
595 .mfc_dsisr_set = mfc_dsisr_set,
596 .mfc_dsisr_get = mfc_dsisr_get,
597 .mfc_sdr_setup = mfc_sdr_setup,
598 .mfc_sr1_set = mfc_sr1_set,
599 .mfc_sr1_get = mfc_sr1_get,
600 .mfc_tclass_id_set = mfc_tclass_id_set,
601 .mfc_tclass_id_get = mfc_tclass_id_get,
602 .tlb_invalidate = tlb_invalidate,
603 .resource_allocation_groupID_set = resource_allocation_groupID_set,
604 .resource_allocation_groupID_get = resource_allocation_groupID_get,
605 .resource_allocation_enable_set = resource_allocation_enable_set,
606 .resource_allocation_enable_get = resource_allocation_enable_get,
607};
608
609void ps3_spu_set_platform(void)
610{
611 spu_priv1_ops = &spu_priv1_ps3_ops;
612 spu_management_ops = &spu_management_ps3_ops;
613}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
new file mode 100644
index 000000000000..1bae8b19b363
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -0,0 +1,104 @@
1/*
2 * PS3 time and rtc routines.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22
23#include <asm/rtc.h>
24#include <asm/lv1call.h>
25#include <asm/ps3.h>
26
27#include "platform.h"
28
29#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__)
30static void _dump_tm(const struct rtc_time *tm, const char* func, int line)
31{
32 pr_debug("%s:%d tm_sec %d\n", func, line, tm->tm_sec);
33 pr_debug("%s:%d tm_min %d\n", func, line, tm->tm_min);
34 pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour);
35 pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday);
36 pr_debug("%s:%d tm_mon %d\n", func, line, tm->tm_mon);
37 pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year);
38 pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday);
39}
40
41#define dump_time(_a) _dump_time(_a, __func__, __LINE__)
42static void __attribute__ ((unused)) _dump_time(int time, const char* func,
43 int line)
44{
45 struct rtc_time tm;
46
47 to_tm(time, &tm);
48
49 pr_debug("%s:%d time %d\n", func, line, time);
50 _dump_tm(&tm, func, line);
51}
52
53/**
54 * rtc_shift - Difference in seconds between 1970 and the ps3 rtc value.
55 */
56
57static s64 rtc_shift;
58
59void __init ps3_calibrate_decr(void)
60{
61 int result;
62 u64 tmp;
63
64 result = ps3_repository_read_be_tb_freq(0, &tmp);
65 BUG_ON(result);
66
67 ppc_tb_freq = tmp;
68 ppc_proc_freq = ppc_tb_freq * 40;
69
70 rtc_shift = ps3_os_area_rtc_diff();
71}
72
73static u64 read_rtc(void)
74{
75 int result;
76 u64 rtc_val;
77 u64 tb_val;
78
79 result = lv1_get_rtc(&rtc_val, &tb_val);
80 BUG_ON(result);
81
82 return rtc_val;
83}
84
85int ps3_set_rtc_time(struct rtc_time *tm)
86{
87 u64 now = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
88 tm->tm_hour, tm->tm_min, tm->tm_sec);
89
90 rtc_shift = now - read_rtc();
91 return 0;
92}
93
94void ps3_get_rtc_time(struct rtc_time *tm)
95{
96 to_tm(read_rtc() + rtc_shift, tm);
97 tm->tm_year -= 1900;
98 tm->tm_mon -= 1;
99}
100
101unsigned long __init ps3_get_boot_time(void)
102{
103 return read_rtc() + rtc_shift;
104}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 997243a91be8..69590fbf83da 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_XICS) += xics.o
10obj-$(CONFIG_SCANLOG) += scanlog.o 10obj-$(CONFIG_SCANLOG) += scanlog.o
11obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o 11obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
12 12
13obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
14
13obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 15obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
14obj-$(CONFIG_HVCS) += hvcserver.o 16obj-$(CONFIG_HVCS) += hvcserver.o
15obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o 17obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 3c2d63ebf787..da6e5362e7cd 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -337,6 +337,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
337 printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n", 337 printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
338 pdn->eeh_check_count); 338 pdn->eeh_check_count);
339 dump_stack(); 339 dump_stack();
340 msleep(5000);
340 341
341 /* re-read the slot reset state */ 342 /* re-read the slot reset state */
342 if (read_slot_reset_state(pdn, rets) != 0) 343 if (read_slot_reset_state(pdn, rets) != 0)
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index b6b462d3c604..f2bae04424f8 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -153,7 +153,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
153 return piar; 153 return piar;
154 } 154 }
155 } 155 }
156 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); 156 piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
157 if (!piar) 157 if (!piar)
158 return NULL; 158 return NULL;
159 159
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index c2bc9904f1cb..cbd6b0711ab4 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -170,14 +170,19 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
170static void eeh_report_resume(struct pci_dev *dev, void *userdata) 170static void eeh_report_resume(struct pci_dev *dev, void *userdata)
171{ 171{
172 struct pci_driver *driver = dev->driver; 172 struct pci_driver *driver = dev->driver;
173 struct device_node *dn = pci_device_to_OF_node(dev);
173 174
174 dev->error_state = pci_channel_io_normal; 175 dev->error_state = pci_channel_io_normal;
175 176
176 if (!driver) 177 if (!driver)
177 return; 178 return;
178 if (!driver->err_handler) 179
179 return; 180 if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
180 if (!driver->err_handler->resume) 181 PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
182 enable_irq(dev->irq);
183 }
184 if (!driver->err_handler ||
185 !driver->err_handler->resume)
181 return; 186 return;
182 187
183 driver->err_handler->resume(dev); 188 driver->err_handler->resume(dev);
@@ -407,6 +412,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
407 412
408 if (rc) 413 if (rc)
409 result = PCI_ERS_RESULT_NEED_RESET; 414 result = PCI_ERS_RESULT_NEED_RESET;
415 else
416 result = PCI_ERS_RESULT_RECOVERED;
410 } 417 }
411 418
412 /* If any device has a hard failure, then shut off everything. */ 419 /* If any device has a hard failure, then shut off everything. */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 137077451316..49037edf7d39 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
37/* EEH event workqueue setup. */ 37/* EEH event workqueue setup. */
38static DEFINE_SPINLOCK(eeh_eventlist_lock); 38static DEFINE_SPINLOCK(eeh_eventlist_lock);
39LIST_HEAD(eeh_eventlist); 39LIST_HEAD(eeh_eventlist);
40static void eeh_thread_launcher(void *); 40static void eeh_thread_launcher(struct work_struct *);
41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 41DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
42 42
43/* Serialize reset sequences for a given pci device */ 43/* Serialize reset sequences for a given pci device */
44DEFINE_MUTEX(eeh_event_mutex); 44DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
103 * eeh_thread_launcher 103 * eeh_thread_launcher
104 * @dummy - unused 104 * @dummy - unused
105 */ 105 */
106static void eeh_thread_launcher(void *dummy) 106static void eeh_thread_launcher(struct work_struct *dummy)
107{ 107{
108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) 108 if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
109 printk(KERN_ERR "Failed to start EEH daemon\n"); 109 printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
new file mode 100644
index 000000000000..f460b9cbfd46
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -0,0 +1,275 @@
1/*
2 * pseries CPU Hotplug infrastructure.
3 *
4 * Split out from arch/powerpc/platforms/pseries/setup.c
5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
6 *
7 * Peter Bergner, IBM March 2001.
8 * Copyright (C) 2001 IBM.
9 * Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 * Plus various changes from other IBM teams...
12 *
13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/cpu.h>
24#include <asm/system.h>
25#include <asm/prom.h>
26#include <asm/rtas.h>
27#include <asm/firmware.h>
28#include <asm/machdep.h>
29#include <asm/vdso_datapage.h>
30#include <asm/pSeries_reconfig.h>
31#include "xics.h"
32
33/* This version can't take the spinlock, because it never returns */
34static struct rtas_args rtas_stop_self_args = {
35 .token = RTAS_UNKNOWN_SERVICE,
36 .nargs = 0,
37 .nret = 1,
38 .rets = &rtas_stop_self_args.args[0],
39};
40
41static void rtas_stop_self(void)
42{
43 struct rtas_args *args = &rtas_stop_self_args;
44
45 local_irq_disable();
46
47 BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
48
49 printk("cpu %u (hwid %u) Ready to die...\n",
50 smp_processor_id(), hard_smp_processor_id());
51 enter_rtas(__pa(args));
52
53 panic("Alas, I survived.\n");
54}
55
56static void pseries_mach_cpu_die(void)
57{
58 local_irq_disable();
59 idle_task_exit();
60 xics_teardown_cpu(0);
61 rtas_stop_self();
62 /* Should never get here... */
63 BUG();
64 for(;;);
65}
66
67static int qcss_tok; /* query-cpu-stopped-state token */
68
69/* Get state of physical CPU.
70 * Return codes:
71 * 0 - The processor is in the RTAS stopped state
72 * 1 - stop-self is in progress
73 * 2 - The processor is not in the RTAS stopped state
74 * -1 - Hardware Error
75 * -2 - Hardware Busy, Try again later.
76 */
77static int query_cpu_stopped(unsigned int pcpu)
78{
79 int cpu_status, status;
80
81 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
82 if (status != 0) {
83 printk(KERN_ERR
84 "RTAS query-cpu-stopped-state failed: %i\n", status);
85 return status;
86 }
87
88 return cpu_status;
89}
90
91static int pseries_cpu_disable(void)
92{
93 int cpu = smp_processor_id();
94
95 cpu_clear(cpu, cpu_online_map);
96 vdso_data->processorCount--;
97
98 /*fix boot_cpuid here*/
99 if (cpu == boot_cpuid)
100 boot_cpuid = any_online_cpu(cpu_online_map);
101
102 /* FIXME: abstract this to not be platform specific later on */
103 xics_migrate_irqs_away();
104 return 0;
105}
106
107static void pseries_cpu_die(unsigned int cpu)
108{
109 int tries;
110 int cpu_status;
111 unsigned int pcpu = get_hard_smp_processor_id(cpu);
112
113 for (tries = 0; tries < 25; tries++) {
114 cpu_status = query_cpu_stopped(pcpu);
115 if (cpu_status == 0 || cpu_status == -1)
116 break;
117 msleep(200);
118 }
119 if (cpu_status != 0) {
120 printk("Querying DEAD? cpu %i (%i) shows %i\n",
121 cpu, pcpu, cpu_status);
122 }
123
124 /* Isolation and deallocation are definatly done by
125 * drslot_chrp_cpu. If they were not they would be
126 * done here. Change isolate state to Isolate and
127 * change allocation-state to Unusable.
128 */
129 paca[cpu].cpu_start = 0;
130}
131
132/*
133 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
134 * here is that a cpu device node may represent up to two logical cpus
135 * in the SMT case. We must honor the assumption in other code that
136 * the logical ids for sibling SMT threads x and y are adjacent, such
137 * that x^1 == y and y^1 == x.
138 */
139static int pseries_add_processor(struct device_node *np)
140{
141 unsigned int cpu;
142 cpumask_t candidate_map, tmp = CPU_MASK_NONE;
143 int err = -ENOSPC, len, nthreads, i;
144 const u32 *intserv;
145
146 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
147 if (!intserv)
148 return 0;
149
150 nthreads = len / sizeof(u32);
151 for (i = 0; i < nthreads; i++)
152 cpu_set(i, tmp);
153
154 lock_cpu_hotplug();
155
156 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
157
158 /* Get a bitmap of unoccupied slots. */
159 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
160 if (cpus_empty(candidate_map)) {
161 /* If we get here, it most likely means that NR_CPUS is
162 * less than the partition's max processors setting.
163 */
164 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
165 " supports %d logical cpus.\n", np->full_name,
166 cpus_weight(cpu_possible_map));
167 goto out_unlock;
168 }
169
170 while (!cpus_empty(tmp))
171 if (cpus_subset(tmp, candidate_map))
172 /* Found a range where we can insert the new cpu(s) */
173 break;
174 else
175 cpus_shift_left(tmp, tmp, nthreads);
176
177 if (cpus_empty(tmp)) {
178 printk(KERN_ERR "Unable to find space in cpu_present_map for"
179 " processor %s with %d thread(s)\n", np->name,
180 nthreads);
181 goto out_unlock;
182 }
183
184 for_each_cpu_mask(cpu, tmp) {
185 BUG_ON(cpu_isset(cpu, cpu_present_map));
186 cpu_set(cpu, cpu_present_map);
187 set_hard_smp_processor_id(cpu, *intserv++);
188 }
189 err = 0;
190out_unlock:
191 unlock_cpu_hotplug();
192 return err;
193}
194
195/*
196 * Update the present map for a cpu node which is going away, and set
197 * the hard id in the paca(s) to -1 to be consistent with boot time
198 * convention for non-present cpus.
199 */
200static void pseries_remove_processor(struct device_node *np)
201{
202 unsigned int cpu;
203 int len, nthreads, i;
204 const u32 *intserv;
205
206 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
207 if (!intserv)
208 return;
209
210 nthreads = len / sizeof(u32);
211
212 lock_cpu_hotplug();
213 for (i = 0; i < nthreads; i++) {
214 for_each_present_cpu(cpu) {
215 if (get_hard_smp_processor_id(cpu) != intserv[i])
216 continue;
217 BUG_ON(cpu_online(cpu));
218 cpu_clear(cpu, cpu_present_map);
219 set_hard_smp_processor_id(cpu, -1);
220 break;
221 }
222 if (cpu == NR_CPUS)
223 printk(KERN_WARNING "Could not find cpu to remove "
224 "with physical id 0x%x\n", intserv[i]);
225 }
226 unlock_cpu_hotplug();
227}
228
229static int pseries_smp_notifier(struct notifier_block *nb,
230 unsigned long action, void *node)
231{
232 int err = NOTIFY_OK;
233
234 switch (action) {
235 case PSERIES_RECONFIG_ADD:
236 if (pseries_add_processor(node))
237 err = NOTIFY_BAD;
238 break;
239 case PSERIES_RECONFIG_REMOVE:
240 pseries_remove_processor(node);
241 break;
242 default:
243 err = NOTIFY_DONE;
244 break;
245 }
246 return err;
247}
248
249static struct notifier_block pseries_smp_nb = {
250 .notifier_call = pseries_smp_notifier,
251};
252
253static int __init pseries_cpu_hotplug_init(void)
254{
255 rtas_stop_self_args.token = rtas_token("stop-self");
256 qcss_tok = rtas_token("query-cpu-stopped-state");
257
258 if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
259 qcss_tok == RTAS_UNKNOWN_SERVICE) {
260 printk(KERN_INFO "CPU Hotplug not supported by firmware "
261 "- disabling.\n");
262 return 0;
263 }
264
265 ppc_md.cpu_die = pseries_mach_cpu_die;
266 smp_ops->cpu_disable = pseries_cpu_disable;
267 smp_ops->cpu_die = pseries_cpu_die;
268
269 /* Processors can be added/removed only on LPAR */
270 if (firmware_has_feature(FW_FEATURE_LPAR))
271 pSeries_reconfig_notifier_register(&pseries_smp_nb);
272
273 return 0;
274}
275arch_initcall(pseries_cpu_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c00cfed7af2c..5c7e38789897 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -26,7 +26,7 @@
26BEGIN_FTR_SECTION; \ 26BEGIN_FTR_SECTION; \
27 mfspr r0,SPRN_PURR; /* get PURR and */ \ 27 mfspr r0,SPRN_PURR; /* get PURR and */ \
28 std r0,STK_PARM(r6)(r1); /* save for later */ \ 28 std r0,STK_PARM(r6)(r1); /* save for later */ \
29END_FTR_SECTION_IFCLR(CPU_FTR_PURR); 29END_FTR_SECTION_IFSET(CPU_FTR_PURR);
30 30
31/* 31/*
32 * postcall is performed immediately before function return which 32 * postcall is performed immediately before function return which
@@ -43,7 +43,7 @@ BEGIN_FTR_SECTION; \
43 mfspr r8,SPRN_PURR; /* PURR after */ \ 43 mfspr r8,SPRN_PURR; /* PURR after */ \
44 ld r6,STK_PARM(r6)(r1); /* PURR before */ \ 44 ld r6,STK_PARM(r6)(r1); /* PURR before */ \
45 subf r6,r6,r8; /* delta */ \ 45 subf r6,r6,r8; /* delta */ \
46END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 46END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
47 ld r5,STK_PARM(r5)(r1); /* timebase before */ \ 47 ld r5,STK_PARM(r5)(r1); /* timebase before */ \
48 subf r5,r5,r7; /* time delta */ \ 48 subf r5,r5,r7; /* time delta */ \
49 \ 49 \
@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION; \
66 ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ 66 ld r7,HCALL_STAT_PURR(r4); /* PURR */ \
67 add r7,r7,r6; \ 67 add r7,r7,r6; \
68 std r7,HCALL_STAT_PURR(r4); \ 68 std r7,HCALL_STAT_PURR(r4); \
69END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ 69END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
701: 701:
71#else 71#else
72#define HCALL_INST_PRECALL 72#define HCALL_INST_PRECALL
@@ -145,6 +145,7 @@ _GLOBAL(plpar_hcall9)
145 145
146 HVSC /* invoke the hypervisor */ 146 HVSC /* invoke the hypervisor */
147 147
148 mr r0,r12
148 ld r12,STK_PARM(r4)(r1) 149 ld r12,STK_PARM(r4)(r1)
149 std r4, 0(r12) 150 std r4, 0(r12)
150 std r5, 8(r12) 151 std r5, 8(r12)
@@ -154,7 +155,7 @@ _GLOBAL(plpar_hcall9)
154 std r9, 40(r12) 155 std r9, 40(r12)
155 std r10,48(r12) 156 std r10,48(r12)
156 std r11,56(r12) 157 std r11,56(r12)
157 std r12,64(r12) 158 std r0, 64(r12)
158 159
159 HCALL_INST_POSTCALL 160 HCALL_INST_POSTCALL
160 161
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 446e17d162a5..3ddc04925d50 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
34 */ 34 */
35static void *hc_start(struct seq_file *m, loff_t *pos) 35static void *hc_start(struct seq_file *m, loff_t *pos)
36{ 36{
37 if ((int)*pos < HCALL_STAT_ARRAY_SIZE) 37 if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
38 return (void *)(unsigned long)(*pos + 1); 38 return (void *)(unsigned long)(*pos + 1);
39 39
40 return NULL; 40 return NULL;
@@ -57,7 +57,7 @@ static int hc_show(struct seq_file *m, void *p)
57 struct hcall_stats *hs = (struct hcall_stats *)m->private; 57 struct hcall_stats *hs = (struct hcall_stats *)m->private;
58 58
59 if (hs[h_num].num_calls) { 59 if (hs[h_num].num_calls) {
60 if (!cpu_has_feature(CPU_FTR_PURR)) 60 if (cpu_has_feature(CPU_FTR_PURR))
61 seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, 61 seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
62 hs[h_num].num_calls, 62 hs[h_num].num_calls,
63 hs[h_num].tb_total, 63 hs[h_num].tb_total,
@@ -85,7 +85,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file)
85 85
86 rc = seq_open(file, &hcall_inst_seq_ops); 86 rc = seq_open(file, &hcall_inst_seq_ops);
87 seq = file->private_data; 87 seq = file->private_data;
88 seq->private = file->f_dentry->d_inode->i_private; 88 seq->private = file->f_path.dentry->d_inode->i_private;
89 89
90 return rc; 90 return rc;
91} 91}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 556c279a789d..3c95392f4f41 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -309,7 +309,7 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
309 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 309 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
310} 310}
311 311
312static void iommu_bus_setup_pSeries(struct pci_bus *bus) 312static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
313{ 313{
314 struct device_node *dn; 314 struct device_node *dn;
315 struct iommu_table *tbl; 315 struct iommu_table *tbl;
@@ -318,10 +318,9 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
318 struct pci_dn *pci; 318 struct pci_dn *pci;
319 int children; 319 int children;
320 320
321 DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
322
323 dn = pci_bus_to_OF_node(bus); 321 dn = pci_bus_to_OF_node(bus);
324 pci = PCI_DN(dn); 322
323 DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
325 324
326 if (bus->self) { 325 if (bus->self) {
327 /* This is not a root bus, any setup will be done for the 326 /* This is not a root bus, any setup will be done for the
@@ -329,6 +328,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
329 */ 328 */
330 return; 329 return;
331 } 330 }
331 pci = PCI_DN(dn);
332 332
333 /* Check if the ISA bus on the system is under 333 /* Check if the ISA bus on the system is under
334 * this PHB. 334 * this PHB.
@@ -390,17 +390,17 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
390} 390}
391 391
392 392
393static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) 393static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
394{ 394{
395 struct iommu_table *tbl; 395 struct iommu_table *tbl;
396 struct device_node *dn, *pdn; 396 struct device_node *dn, *pdn;
397 struct pci_dn *ppci; 397 struct pci_dn *ppci;
398 const void *dma_window = NULL; 398 const void *dma_window = NULL;
399 399
400 DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
401
402 dn = pci_bus_to_OF_node(bus); 400 dn = pci_bus_to_OF_node(bus);
403 401
402 DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name);
403
404 /* Find nearest ibm,dma-window, walking up the device tree */ 404 /* Find nearest ibm,dma-window, walking up the device tree */
405 for (pdn = dn; pdn != NULL; pdn = pdn->parent) { 405 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
406 dma_window = get_property(pdn, "ibm,dma-window", NULL); 406 dma_window = get_property(pdn, "ibm,dma-window", NULL);
@@ -409,11 +409,15 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
409 } 409 }
410 410
411 if (dma_window == NULL) { 411 if (dma_window == NULL) {
412 DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name); 412 DBG(" no ibm,dma-window property !\n");
413 return; 413 return;
414 } 414 }
415 415
416 ppci = PCI_DN(pdn); 416 ppci = PCI_DN(pdn);
417
418 DBG(" parent is %s, iommu_table: 0x%p\n",
419 pdn->full_name, ppci->iommu_table);
420
417 if (!ppci->iommu_table) { 421 if (!ppci->iommu_table) {
418 /* Bussubno hasn't been copied yet. 422 /* Bussubno hasn't been copied yet.
419 * Do it now because iommu_table_setparms_lpar needs it. 423 * Do it now because iommu_table_setparms_lpar needs it.
@@ -427,6 +431,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
427 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); 431 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
428 432
429 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); 433 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
434 DBG(" created table: %p\n", ppci->iommu_table);
430 } 435 }
431 436
432 if (pdn != dn) 437 if (pdn != dn)
@@ -434,27 +439,27 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
434} 439}
435 440
436 441
437static void iommu_dev_setup_pSeries(struct pci_dev *dev) 442static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
438{ 443{
439 struct device_node *dn, *mydn; 444 struct device_node *dn;
440 struct iommu_table *tbl; 445 struct iommu_table *tbl;
441 446
442 DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev)); 447 DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
443 448
444 mydn = dn = pci_device_to_OF_node(dev); 449 dn = dev->dev.archdata.of_node;
445 450
446 /* If we're the direct child of a root bus, then we need to allocate 451 /* If we're the direct child of a root bus, then we need to allocate
447 * an iommu table ourselves. The bus setup code should have setup 452 * an iommu table ourselves. The bus setup code should have setup
448 * the window sizes already. 453 * the window sizes already.
449 */ 454 */
450 if (!dev->bus->self) { 455 if (!dev->bus->self) {
456 struct pci_controller *phb = PCI_DN(dn)->phb;
457
451 DBG(" --> first child, no bridge. Allocating iommu table.\n"); 458 DBG(" --> first child, no bridge. Allocating iommu table.\n");
452 tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, 459 tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
453 PCI_DN(dn)->phb->node); 460 phb->node);
454 iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl); 461 iommu_table_setparms(phb, dn, tbl);
455 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, 462 dev->dev.archdata.dma_data = iommu_init_table(tbl, phb->node);
456 PCI_DN(dn)->phb->node);
457
458 return; 463 return;
459 } 464 }
460 465
@@ -465,11 +470,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
465 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL) 470 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
466 dn = dn->parent; 471 dn = dn->parent;
467 472
468 if (dn && PCI_DN(dn)) { 473 if (dn && PCI_DN(dn))
469 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; 474 dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
470 } else { 475 else
471 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); 476 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
472 } 477 pci_name(dev));
473} 478}
474 479
475static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) 480static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
@@ -495,13 +500,15 @@ static struct notifier_block iommu_reconfig_nb = {
495 .notifier_call = iommu_reconfig_notifier, 500 .notifier_call = iommu_reconfig_notifier,
496}; 501};
497 502
498static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) 503static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
499{ 504{
500 struct device_node *pdn, *dn; 505 struct device_node *pdn, *dn;
501 struct iommu_table *tbl; 506 struct iommu_table *tbl;
502 const void *dma_window = NULL; 507 const void *dma_window = NULL;
503 struct pci_dn *pci; 508 struct pci_dn *pci;
504 509
510 DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
511
505 /* dev setup for LPAR is a little tricky, since the device tree might 512 /* dev setup for LPAR is a little tricky, since the device tree might
506 * contain the dma-window properties per-device and not neccesarily 513 * contain the dma-window properties per-device and not neccesarily
507 * for the bus. So we need to search upwards in the tree until we 514 * for the bus. So we need to search upwards in the tree until we
@@ -509,9 +516,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
509 * already allocated. 516 * already allocated.
510 */ 517 */
511 dn = pci_device_to_OF_node(dev); 518 dn = pci_device_to_OF_node(dev);
512 519 DBG(" node is %s\n", dn->full_name);
513 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
514 dev, pci_name(dev), dn->full_name);
515 520
516 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; 521 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
517 pdn = pdn->parent) { 522 pdn = pdn->parent) {
@@ -520,16 +525,17 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
520 break; 525 break;
521 } 526 }
522 527
528 DBG(" parent is %s\n", pdn->full_name);
529
523 /* Check for parent == NULL so we don't try to setup the empty EADS 530 /* Check for parent == NULL so we don't try to setup the empty EADS
524 * slots on POWER4 machines. 531 * slots on POWER4 machines.
525 */ 532 */
526 if (dma_window == NULL || pdn->parent == NULL) { 533 if (dma_window == NULL || pdn->parent == NULL) {
527 DBG("No dma window for device, linking to parent\n"); 534 DBG(" no dma window for device, linking to parent\n");
528 PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table; 535 dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
529 return; 536 return;
530 } else {
531 DBG("Found DMA window, allocating table\n");
532 } 537 }
538 DBG(" found DMA window, table: %p\n", pci->iommu_table);
533 539
534 pci = PCI_DN(pdn); 540 pci = PCI_DN(pdn);
535 if (!pci->iommu_table) { 541 if (!pci->iommu_table) {
@@ -542,24 +548,20 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
542 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); 548 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
543 549
544 pci->iommu_table = iommu_init_table(tbl, pci->phb->node); 550 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
551 DBG(" created table: %p\n", pci->iommu_table);
545 } 552 }
546 553
547 if (pdn != dn) 554 dev->dev.archdata.dma_data = pci->iommu_table;
548 PCI_DN(dn)->iommu_table = pci->iommu_table;
549} 555}
550 556
551static void iommu_bus_setup_null(struct pci_bus *b) { }
552static void iommu_dev_setup_null(struct pci_dev *d) { }
553
554/* These are called very early. */ 557/* These are called very early. */
555void iommu_init_early_pSeries(void) 558void iommu_init_early_pSeries(void)
556{ 559{
557 if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) { 560 if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) {
558 /* Direct I/O, IOMMU off */ 561 /* Direct I/O, IOMMU off */
559 ppc_md.iommu_dev_setup = iommu_dev_setup_null; 562 ppc_md.pci_dma_dev_setup = NULL;
560 ppc_md.iommu_bus_setup = iommu_bus_setup_null; 563 ppc_md.pci_dma_bus_setup = NULL;
561 pci_direct_iommu_init(); 564 pci_dma_ops = &dma_direct_ops;
562
563 return; 565 return;
564 } 566 }
565 567
@@ -572,19 +574,19 @@ void iommu_init_early_pSeries(void)
572 ppc_md.tce_free = tce_free_pSeriesLP; 574 ppc_md.tce_free = tce_free_pSeriesLP;
573 } 575 }
574 ppc_md.tce_get = tce_get_pSeriesLP; 576 ppc_md.tce_get = tce_get_pSeriesLP;
575 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP; 577 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
576 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP; 578 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
577 } else { 579 } else {
578 ppc_md.tce_build = tce_build_pSeries; 580 ppc_md.tce_build = tce_build_pSeries;
579 ppc_md.tce_free = tce_free_pSeries; 581 ppc_md.tce_free = tce_free_pSeries;
580 ppc_md.tce_get = tce_get_pseries; 582 ppc_md.tce_get = tce_get_pseries;
581 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries; 583 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
582 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries; 584 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
583 } 585 }
584 586
585 587
586 pSeries_reconfig_notifier_register(&iommu_reconfig_nb); 588 pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
587 589
588 pci_iommu_init(); 590 pci_dma_ops = &dma_iommu_ops;
589} 591}
590 592
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 1820a0b0a8c6..721436db3ef0 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -282,7 +282,7 @@ void vpa_init(int cpu)
282 } 282 }
283} 283}
284 284
285long pSeries_lpar_hpte_insert(unsigned long hpte_group, 285static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
286 unsigned long va, unsigned long pa, 286 unsigned long va, unsigned long pa,
287 unsigned long rflags, unsigned long vflags, 287 unsigned long rflags, unsigned long vflags,
288 int psize) 288 int psize)
@@ -506,7 +506,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
506 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 506 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
507 * lock. 507 * lock.
508 */ 508 */
509void pSeries_lpar_flush_hash_range(unsigned long number, int local) 509static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
510{ 510{
511 int i; 511 int i;
512 unsigned long flags = 0; 512 unsigned long flags = 0;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 410a6bcc4ca0..715db5c89908 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,6 @@
29#include <asm/prom.h> 29#include <asm/prom.h>
30#include <asm/ppc-pci.h> 30#include <asm/ppc-pci.h>
31 31
32static int __devinitdata s7a_workaround = -1;
33
34#if 0 32#if 0
35void pcibios_name_device(struct pci_dev *dev) 33void pcibios_name_device(struct pci_dev *dev)
36{ 34{
@@ -57,39 +55,6 @@ void pcibios_name_device(struct pci_dev *dev)
57DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); 55DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
58#endif 56#endif
59 57
60static void __devinit check_s7a(void)
61{
62 struct device_node *root;
63 const char *model;
64
65 s7a_workaround = 0;
66 root = of_find_node_by_path("/");
67 if (root) {
68 model = get_property(root, "model", NULL);
69 if (model && !strcmp(model, "IBM,7013-S7A"))
70 s7a_workaround = 1;
71 of_node_put(root);
72 }
73}
74
75void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
76{
77 struct pci_dev *dev;
78
79 if (s7a_workaround < 0)
80 check_s7a();
81 list_for_each_entry(dev, &bus->devices, bus_list) {
82 pci_read_irq_line(dev);
83 if (s7a_workaround) {
84 if (dev->irq > 16) {
85 dev->irq -= 3;
86 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
87 dev->irq);
88 }
89 }
90 }
91}
92
93static void __init pSeries_request_regions(void) 58static void __init pSeries_request_regions(void)
94{ 59{
95 if (!isa_io_base) 60 if (!isa_io_base)
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 6bfacc217085..ac56b868913a 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -93,8 +93,8 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
93 if (list_empty(&dev->global_list)) { 93 if (list_empty(&dev->global_list)) {
94 int i; 94 int i;
95 95
96 /* Need to setup IOMMU tables */ 96 /* Fill device archdata and setup iommu table */
97 ppc_md.iommu_dev_setup(dev); 97 pcibios_setup_new_device(dev);
98 98
99 if(fix_bus) 99 if(fix_bus)
100 pcibios_fixup_device_resources(dev, bus); 100 pcibios_fixup_device_resources(dev, bus);
@@ -195,7 +195,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
195 phb = pcibios_alloc_controller(dn); 195 phb = pcibios_alloc_controller(dn);
196 if (!phb) 196 if (!phb)
197 return NULL; 197 return NULL;
198 setup_phb(dn, phb); 198 rtas_setup_phb(phb);
199 pci_process_bridge_OF_ranges(phb, dn, 0); 199 pci_process_bridge_OF_ranges(phb, dn, 0);
200 200
201 pci_setup_phb_io_dynamic(phb, primary); 201 pci_setup_phb_io_dynamic(phb, primary);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 1773103354be..4ad33e41b008 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -268,11 +268,10 @@ static char * parse_next_property(char *buf, char *end, char **name, int *length
268static struct property *new_property(const char *name, const int length, 268static struct property *new_property(const char *name, const int length,
269 const unsigned char *value, struct property *last) 269 const unsigned char *value, struct property *last)
270{ 270{
271 struct property *new = kmalloc(sizeof(*new), GFP_KERNEL); 271 struct property *new = kzalloc(sizeof(*new), GFP_KERNEL);
272 272
273 if (!new) 273 if (!new)
274 return NULL; 274 return NULL;
275 memset(new, 0, sizeof(*new));
276 275
277 if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL))) 276 if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL)))
278 goto cleanup; 277 goto cleanup;
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 77a5bb1d9c30..45368a57d7dd 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -47,7 +47,7 @@ static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
47static ssize_t scanlog_read(struct file *file, char __user *buf, 47static ssize_t scanlog_read(struct file *file, char __user *buf,
48 size_t count, loff_t *ppos) 48 size_t count, loff_t *ppos)
49{ 49{
50 struct inode * inode = file->f_dentry->d_inode; 50 struct inode * inode = file->f_path.dentry->d_inode;
51 struct proc_dir_entry *dp; 51 struct proc_dir_entry *dp;
52 unsigned int *data; 52 unsigned int *data;
53 int status; 53 int status;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 89a8119f988d..042ecae107ac 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -347,17 +347,6 @@ static int __init pSeries_init_panel(void)
347} 347}
348arch_initcall(pSeries_init_panel); 348arch_initcall(pSeries_init_panel);
349 349
350static void pSeries_mach_cpu_die(void)
351{
352 local_irq_disable();
353 idle_task_exit();
354 xics_teardown_cpu(0);
355 rtas_stop_self();
356 /* Should never get here... */
357 BUG();
358 for(;;);
359}
360
361static int pseries_set_dabr(unsigned long dabr) 350static int pseries_set_dabr(unsigned long dabr)
362{ 351{
363 return plpar_hcall_norets(H_SET_DABR, dabr); 352 return plpar_hcall_norets(H_SET_DABR, dabr);
@@ -433,19 +422,14 @@ static int __init pSeries_probe_hypertas(unsigned long node,
433 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL) 422 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
434 powerpc_firmware_features |= FW_FEATURE_LPAR; 423 powerpc_firmware_features |= FW_FEATURE_LPAR;
435 424
436 if (firmware_has_feature(FW_FEATURE_LPAR))
437 hpte_init_lpar();
438 else
439 hpte_init_native();
440
441 return 1; 425 return 1;
442} 426}
443 427
444static int __init pSeries_probe(void) 428static int __init pSeries_probe(void)
445{ 429{
446 unsigned long root = of_get_flat_dt_root(); 430 unsigned long root = of_get_flat_dt_root();
447 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), 431 char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
448 "device_type", NULL); 432
449 if (dtype == NULL) 433 if (dtype == NULL)
450 return 0; 434 return 0;
451 if (strcmp(dtype, "chrp")) 435 if (strcmp(dtype, "chrp"))
@@ -463,6 +447,11 @@ static int __init pSeries_probe(void)
463 /* Now try to figure out if we are running on LPAR */ 447 /* Now try to figure out if we are running on LPAR */
464 of_scan_flat_dt(pSeries_probe_hypertas, NULL); 448 of_scan_flat_dt(pSeries_probe_hypertas, NULL);
465 449
450 if (firmware_has_feature(FW_FEATURE_LPAR))
451 hpte_init_lpar();
452 else
453 hpte_init_native();
454
466 DBG("Machine is%s LPAR !\n", 455 DBG("Machine is%s LPAR !\n",
467 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); 456 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
468 457
@@ -553,12 +542,10 @@ define_machine(pseries) {
553 .log_error = pSeries_log_error, 542 .log_error = pSeries_log_error,
554 .pcibios_fixup = pSeries_final_fixup, 543 .pcibios_fixup = pSeries_final_fixup,
555 .pci_probe_mode = pSeries_pci_probe_mode, 544 .pci_probe_mode = pSeries_pci_probe_mode,
556 .irq_bus_setup = pSeries_irq_bus_setup,
557 .restart = rtas_restart, 545 .restart = rtas_restart,
558 .power_off = rtas_power_off, 546 .power_off = rtas_power_off,
559 .halt = rtas_halt, 547 .halt = rtas_halt,
560 .panic = rtas_os_term, 548 .panic = rtas_os_term,
561 .cpu_die = pSeries_mach_cpu_die,
562 .get_boot_time = rtas_get_boot_time, 549 .get_boot_time = rtas_get_boot_time,
563 .get_rtc_time = rtas_get_rtc_time, 550 .get_rtc_time = rtas_get_rtc_time,
564 .set_rtc_time = rtas_set_rtc_time, 551 .set_rtc_time = rtas_set_rtc_time,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c6624b8a0e77..4408518eaebe 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -64,197 +64,6 @@ static cpumask_t of_spin_map;
64 64
65extern void generic_secondary_smp_init(unsigned long); 65extern void generic_secondary_smp_init(unsigned long);
66 66
67#ifdef CONFIG_HOTPLUG_CPU
68
69/* Get state of physical CPU.
70 * Return codes:
71 * 0 - The processor is in the RTAS stopped state
72 * 1 - stop-self is in progress
73 * 2 - The processor is not in the RTAS stopped state
74 * -1 - Hardware Error
75 * -2 - Hardware Busy, Try again later.
76 */
77static int query_cpu_stopped(unsigned int pcpu)
78{
79 int cpu_status;
80 int status, qcss_tok;
81
82 qcss_tok = rtas_token("query-cpu-stopped-state");
83 if (qcss_tok == RTAS_UNKNOWN_SERVICE)
84 return -1;
85 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
86 if (status != 0) {
87 printk(KERN_ERR
88 "RTAS query-cpu-stopped-state failed: %i\n", status);
89 return status;
90 }
91
92 return cpu_status;
93}
94
95static int pSeries_cpu_disable(void)
96{
97 int cpu = smp_processor_id();
98
99 cpu_clear(cpu, cpu_online_map);
100 vdso_data->processorCount--;
101
102 /*fix boot_cpuid here*/
103 if (cpu == boot_cpuid)
104 boot_cpuid = any_online_cpu(cpu_online_map);
105
106 /* FIXME: abstract this to not be platform specific later on */
107 xics_migrate_irqs_away();
108 return 0;
109}
110
111static void pSeries_cpu_die(unsigned int cpu)
112{
113 int tries;
114 int cpu_status;
115 unsigned int pcpu = get_hard_smp_processor_id(cpu);
116
117 for (tries = 0; tries < 25; tries++) {
118 cpu_status = query_cpu_stopped(pcpu);
119 if (cpu_status == 0 || cpu_status == -1)
120 break;
121 msleep(200);
122 }
123 if (cpu_status != 0) {
124 printk("Querying DEAD? cpu %i (%i) shows %i\n",
125 cpu, pcpu, cpu_status);
126 }
127
128 /* Isolation and deallocation are definatly done by
129 * drslot_chrp_cpu. If they were not they would be
130 * done here. Change isolate state to Isolate and
131 * change allocation-state to Unusable.
132 */
133 paca[cpu].cpu_start = 0;
134}
135
136/*
137 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
138 * here is that a cpu device node may represent up to two logical cpus
139 * in the SMT case. We must honor the assumption in other code that
140 * the logical ids for sibling SMT threads x and y are adjacent, such
141 * that x^1 == y and y^1 == x.
142 */
143static int pSeries_add_processor(struct device_node *np)
144{
145 unsigned int cpu;
146 cpumask_t candidate_map, tmp = CPU_MASK_NONE;
147 int err = -ENOSPC, len, nthreads, i;
148 const u32 *intserv;
149
150 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
151 if (!intserv)
152 return 0;
153
154 nthreads = len / sizeof(u32);
155 for (i = 0; i < nthreads; i++)
156 cpu_set(i, tmp);
157
158 lock_cpu_hotplug();
159
160 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
161
162 /* Get a bitmap of unoccupied slots. */
163 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
164 if (cpus_empty(candidate_map)) {
165 /* If we get here, it most likely means that NR_CPUS is
166 * less than the partition's max processors setting.
167 */
168 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
169 " supports %d logical cpus.\n", np->full_name,
170 cpus_weight(cpu_possible_map));
171 goto out_unlock;
172 }
173
174 while (!cpus_empty(tmp))
175 if (cpus_subset(tmp, candidate_map))
176 /* Found a range where we can insert the new cpu(s) */
177 break;
178 else
179 cpus_shift_left(tmp, tmp, nthreads);
180
181 if (cpus_empty(tmp)) {
182 printk(KERN_ERR "Unable to find space in cpu_present_map for"
183 " processor %s with %d thread(s)\n", np->name,
184 nthreads);
185 goto out_unlock;
186 }
187
188 for_each_cpu_mask(cpu, tmp) {
189 BUG_ON(cpu_isset(cpu, cpu_present_map));
190 cpu_set(cpu, cpu_present_map);
191 set_hard_smp_processor_id(cpu, *intserv++);
192 }
193 err = 0;
194out_unlock:
195 unlock_cpu_hotplug();
196 return err;
197}
198
199/*
200 * Update the present map for a cpu node which is going away, and set
201 * the hard id in the paca(s) to -1 to be consistent with boot time
202 * convention for non-present cpus.
203 */
204static void pSeries_remove_processor(struct device_node *np)
205{
206 unsigned int cpu;
207 int len, nthreads, i;
208 const u32 *intserv;
209
210 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
211 if (!intserv)
212 return;
213
214 nthreads = len / sizeof(u32);
215
216 lock_cpu_hotplug();
217 for (i = 0; i < nthreads; i++) {
218 for_each_present_cpu(cpu) {
219 if (get_hard_smp_processor_id(cpu) != intserv[i])
220 continue;
221 BUG_ON(cpu_online(cpu));
222 cpu_clear(cpu, cpu_present_map);
223 set_hard_smp_processor_id(cpu, -1);
224 break;
225 }
226 if (cpu == NR_CPUS)
227 printk(KERN_WARNING "Could not find cpu to remove "
228 "with physical id 0x%x\n", intserv[i]);
229 }
230 unlock_cpu_hotplug();
231}
232
233static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
234{
235 int err = NOTIFY_OK;
236
237 switch (action) {
238 case PSERIES_RECONFIG_ADD:
239 if (pSeries_add_processor(node))
240 err = NOTIFY_BAD;
241 break;
242 case PSERIES_RECONFIG_REMOVE:
243 pSeries_remove_processor(node);
244 break;
245 default:
246 err = NOTIFY_DONE;
247 break;
248 }
249 return err;
250}
251
252static struct notifier_block pSeries_smp_nb = {
253 .notifier_call = pSeries_smp_notifier,
254};
255
256#endif /* CONFIG_HOTPLUG_CPU */
257
258/** 67/**
259 * smp_startup_cpu() - start the given cpu 68 * smp_startup_cpu() - start the given cpu
260 * 69 *
@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void)
422 231
423 DBG(" -> smp_init_pSeries()\n"); 232 DBG(" -> smp_init_pSeries()\n");
424 233
425#ifdef CONFIG_HOTPLUG_CPU
426 smp_ops->cpu_disable = pSeries_cpu_disable;
427 smp_ops->cpu_die = pSeries_cpu_die;
428
429 /* Processors can be added/removed only on LPAR */
430 if (firmware_has_feature(FW_FEATURE_LPAR))
431 pSeries_reconfig_notifier_register(&pSeries_smp_nb);
432#endif
433
434 /* Mark threads which are still spinning in hold loops. */ 234 /* Mark threads which are still spinning in hold loops. */
435 if (cpu_has_feature(CPU_FTR_SMT)) { 235 if (cpu_has_feature(CPU_FTR_SMT)) {
436 for_each_present_cpu(i) { 236 for_each_present_cpu(i) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index d071abe78ab1..81d172d65038 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -224,7 +224,6 @@ static void xics_unmask_irq(unsigned int virq)
224static void xics_mask_real_irq(unsigned int irq) 224static void xics_mask_real_irq(unsigned int irq)
225{ 225{
226 int call_status; 226 int call_status;
227 unsigned int server;
228 227
229 if (irq == XICS_IPI) 228 if (irq == XICS_IPI)
230 return; 229 return;
@@ -236,9 +235,9 @@ static void xics_mask_real_irq(unsigned int irq)
236 return; 235 return;
237 } 236 }
238 237
239 server = get_irq_server(irq);
240 /* Have to set XIVE to 0xff to be able to remove a slot */ 238 /* Have to set XIVE to 0xff to be able to remove a slot */
241 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff); 239 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
240 default_server, 0xff);
242 if (call_status != 0) { 241 if (call_status != 0) {
243 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)" 242 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
244 " returned %d\n", irq, call_status); 243 " returned %d\n", irq, call_status);
@@ -656,13 +655,38 @@ static void __init xics_setup_8259_cascade(void)
656 set_irq_chained_handler(cascade, pseries_8259_cascade); 655 set_irq_chained_handler(cascade, pseries_8259_cascade);
657} 656}
658 657
658static struct device_node *cpuid_to_of_node(int cpu)
659{
660 struct device_node *np;
661 u32 hcpuid = get_hard_smp_processor_id(cpu);
662
663 for_each_node_by_type(np, "cpu") {
664 int i, len;
665 const u32 *intserv;
666
667 intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
668
669 if (!intserv)
670 intserv = get_property(np, "reg", &len);
671
672 i = len / sizeof(u32);
673
674 while (i--)
675 if (intserv[i] == hcpuid)
676 return np;
677 }
678
679 return NULL;
680}
681
659void __init xics_init_IRQ(void) 682void __init xics_init_IRQ(void)
660{ 683{
661 int i; 684 int i, j;
662 struct device_node *np; 685 struct device_node *np;
663 u32 ilen, indx = 0; 686 u32 ilen, indx = 0;
664 const u32 *ireg; 687 const u32 *ireg, *isize;
665 int found = 0; 688 int found = 0;
689 u32 hcpuid;
666 690
667 ppc64_boot_msg(0x20, "XICS Init"); 691 ppc64_boot_msg(0x20, "XICS Init");
668 692
@@ -683,26 +707,31 @@ void __init xics_init_IRQ(void)
683 xics_init_host(); 707 xics_init_host();
684 708
685 /* Find the server numbers for the boot cpu. */ 709 /* Find the server numbers for the boot cpu. */
686 for (np = of_find_node_by_type(NULL, "cpu"); 710 np = cpuid_to_of_node(boot_cpuid);
687 np; 711 BUG_ON(!np);
688 np = of_find_node_by_type(np, "cpu")) { 712 ireg = get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
689 ireg = get_property(np, "reg", &ilen); 713 if (!ireg)
690 if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { 714 goto skip_gserver_check;
691 ireg = get_property(np, 715 i = ilen / sizeof(int);
692 "ibm,ppc-interrupt-gserver#s", &ilen); 716 hcpuid = get_hard_smp_processor_id(boot_cpuid);
693 i = ilen / sizeof(int); 717
694 if (ireg && i > 0) { 718 /* Global interrupt distribution server is specified in the last
695 default_server = ireg[0]; 719 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
696 /* take last element */ 720 * entry fom this property for current boot cpu id and use it as
697 default_distrib_server = ireg[i-1]; 721 * default distribution server
698 } 722 */
699 ireg = get_property(np, 723 for (j = 0; j < i; j += 2) {
724 if (ireg[j] == hcpuid) {
725 default_server = hcpuid;
726 default_distrib_server = ireg[j+1];
727
728 isize = get_property(np,
700 "ibm,interrupt-server#-size", NULL); 729 "ibm,interrupt-server#-size", NULL);
701 if (ireg) 730 if (isize)
702 interrupt_server_size = *ireg; 731 interrupt_server_size = *isize;
703 break;
704 } 732 }
705 } 733 }
734skip_gserver_check:
706 of_node_put(np); 735 of_node_put(np);
707 736
708 if (firmware_has_feature(FW_FEATURE_LPAR)) 737 if (firmware_has_feature(FW_FEATURE_LPAR))