aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c284
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h62
-rw-r--r--arch/powerpc/platforms/cell/iommu.c381
-rw-r--r--arch/powerpc/platforms/cell/iommu.h65
-rw-r--r--arch/powerpc/platforms/cell/setup.c141
-rw-r--r--arch/powerpc/platforms/cell/smp.c230
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c191
8 files changed, 1356 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
new file mode 100644
index 000000000000..55e094b96bc0
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -0,0 +1,2 @@
1obj-y += interrupt.o iommu.o setup.o spider-pic.o
2obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
new file mode 100644
index 000000000000..7fbe78a9327d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -0,0 +1,284 @@
1/*
2 * Cell Internal Interrupt Controller
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/config.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/percpu.h>
27#include <linux/types.h>
28
29#include <asm/io.h>
30#include <asm/pgtable.h>
31#include <asm/prom.h>
32#include <asm/ptrace.h>
33
34#include "interrupt.h"
35
36struct iic_pending_bits {
37 u32 data;
38 u8 flags;
39 u8 class;
40 u8 source;
41 u8 prio;
42};
43
44enum iic_pending_flags {
45 IIC_VALID = 0x80,
46 IIC_IPI = 0x40,
47};
48
49struct iic_regs {
50 struct iic_pending_bits pending;
51 struct iic_pending_bits pending_destr;
52 u64 generate;
53 u64 prio;
54};
55
56struct iic {
57 struct iic_regs __iomem *regs;
58};
59
60static DEFINE_PER_CPU(struct iic, iic);
61
62void iic_local_enable(void)
63{
64 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
65}
66
67void iic_local_disable(void)
68{
69 out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
70}
71
72static unsigned int iic_startup(unsigned int irq)
73{
74 return 0;
75}
76
77static void iic_enable(unsigned int irq)
78{
79 iic_local_enable();
80}
81
82static void iic_disable(unsigned int irq)
83{
84}
85
86static void iic_end(unsigned int irq)
87{
88 iic_local_enable();
89}
90
91static struct hw_interrupt_type iic_pic = {
92 .typename = " CELL-IIC ",
93 .startup = iic_startup,
94 .enable = iic_enable,
95 .disable = iic_disable,
96 .end = iic_end,
97};
98
99static int iic_external_get_irq(struct iic_pending_bits pending)
100{
101 int irq;
102 unsigned char node, unit;
103
104 node = pending.source >> 4;
105 unit = pending.source & 0xf;
106 irq = -1;
107
108 /*
109 * This mapping is specific to the Cell Broadband
110 * Engine. We might need to get the numbers
111 * from the device tree to support future CPUs.
112 */
113 switch (unit) {
114 case 0x00:
115 case 0x0b:
116 /*
117 * One of these units can be connected
118 * to an external interrupt controller.
119 */
120 if (pending.prio > 0x3f ||
121 pending.class != 2)
122 break;
123 irq = IIC_EXT_OFFSET
124 + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE)
125 + node * IIC_NODE_STRIDE;
126 break;
127 case 0x01 ... 0x04:
128 case 0x07 ... 0x0a:
129 /*
130 * These units are connected to the SPEs
131 */
132 if (pending.class > 2)
133 break;
134 irq = IIC_SPE_OFFSET
135 + pending.class * IIC_CLASS_STRIDE
136 + node * IIC_NODE_STRIDE
137 + unit;
138 break;
139 }
140 if (irq == -1)
141 printk(KERN_WARNING "Unexpected interrupt class %02x, "
142 "source %02x, prio %02x, cpu %02x\n", pending.class,
143 pending.source, pending.prio, smp_processor_id());
144 return irq;
145}
146
147/* Get an IRQ number from the pending state register of the IIC */
148int iic_get_irq(struct pt_regs *regs)
149{
150 struct iic *iic;
151 int irq;
152 struct iic_pending_bits pending;
153
154 iic = &__get_cpu_var(iic);
155 *(unsigned long *) &pending =
156 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
157
158 irq = -1;
159 if (pending.flags & IIC_VALID) {
160 if (pending.flags & IIC_IPI) {
161 irq = IIC_IPI_OFFSET + (pending.prio >> 4);
162/*
163 if (irq > 0x80)
164 printk(KERN_WARNING "Unexpected IPI prio %02x"
165 "on CPU %02x\n", pending.prio,
166 smp_processor_id());
167*/
168 } else {
169 irq = iic_external_get_irq(pending);
170 }
171 }
172 return irq;
173}
174
175static struct iic_regs __iomem *find_iic(int cpu)
176{
177 struct device_node *np;
178 int nodeid = cpu / 2;
179 unsigned long regs;
180 struct iic_regs __iomem *iic_regs;
181
182 for (np = of_find_node_by_type(NULL, "cpu");
183 np;
184 np = of_find_node_by_type(np, "cpu")) {
185 if (nodeid == *(int *)get_property(np, "node-id", NULL))
186 break;
187 }
188
189 if (!np) {
190 printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
191 iic_regs = NULL;
192 } else {
193 regs = *(long *)get_property(np, "iic", NULL);
194
195 /* hack until we have decided on the devtree info */
196 regs += 0x400;
197 if (cpu & 1)
198 regs += 0x20;
199
200 printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
201 iic_regs = __ioremap(regs, sizeof(struct iic_regs),
202 _PAGE_NO_CACHE);
203 }
204 return iic_regs;
205}
206
207#ifdef CONFIG_SMP
208
209/* Use the highest interrupt priorities for IPI */
210static inline int iic_ipi_to_irq(int ipi)
211{
212 return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi;
213}
214
215static inline int iic_irq_to_ipi(int irq)
216{
217 return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET);
218}
219
220void iic_setup_cpu(void)
221{
222 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
223}
224
225void iic_cause_IPI(int cpu, int mesg)
226{
227 out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
228}
229
230static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
231{
232 smp_message_recv(iic_irq_to_ipi(irq), regs);
233 return IRQ_HANDLED;
234}
235
236static void iic_request_ipi(int ipi, const char *name)
237{
238 int irq;
239
240 irq = iic_ipi_to_irq(ipi);
241 /* IPIs are marked SA_INTERRUPT as they must run with irqs
242 * disabled */
243 get_irq_desc(irq)->handler = &iic_pic;
244 get_irq_desc(irq)->status |= IRQ_PER_CPU;
245 request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL);
246}
247
248void iic_request_IPIs(void)
249{
250 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
251 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
252#ifdef CONFIG_DEBUGGER
253 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
254#endif /* CONFIG_DEBUGGER */
255}
256#endif /* CONFIG_SMP */
257
258static void iic_setup_spe_handlers(void)
259{
260 int be, isrc;
261
262 /* Assume two threads per BE are present */
263 for (be=0; be < num_present_cpus() / 2; be++) {
264 for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
265 int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
266 get_irq_desc(irq)->handler = &iic_pic;
267 }
268 }
269}
270
271void iic_init_IRQ(void)
272{
273 int cpu, irq_offset;
274 struct iic *iic;
275
276 irq_offset = 0;
277 for_each_cpu(cpu) {
278 iic = &per_cpu(iic, cpu);
279 iic->regs = find_iic(cpu);
280 if (iic->regs)
281 out_be64(&iic->regs->prio, 0xff);
282 }
283 iic_setup_spe_handlers();
284}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
new file mode 100644
index 000000000000..37d58e6fd0c6
--- /dev/null
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -0,0 +1,62 @@
1#ifndef ASM_CELL_PIC_H
2#define ASM_CELL_PIC_H
3#ifdef __KERNEL__
4/*
5 * Mapping of IIC pending bits into per-node
6 * interrupt numbers.
7 *
8 * IRQ FF CC SS PP FF CC SS PP Description
9 *
10 * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge
11 * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge
12 * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0
13 * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1
14 * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2
15 * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI
16 *
17 * F flags
18 * C class
19 * S source
20 * P Priority
21 * + node number
22 * * don't care
23 *
24 * A node consists of a Cell Broadband Engine and an optional
25 * south bridge device providing a maximum of 64 IRQs.
26 * The south bridge may be connected to either IOIF0
27 * or IOIF1.
28 * Each SPE is represented as three IRQ lines, one per
29 * interrupt class.
30 * 16 IRQ numbers are reserved for inter processor
31 * interruptions, although these are only used in the
32 * range of the first node.
33 *
34 * This scheme needs 128 IRQ numbers per BIF node ID,
35 * which means that with the total of 512 lines
36 * available, we can have a maximum of four nodes.
37 */
38
39enum {
40 IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */
41 IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */
42 IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */
43 IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */
44 IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */
45 IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
46 IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */
47};
48
49extern void iic_init_IRQ(void);
50extern int iic_get_irq(struct pt_regs *regs);
51extern void iic_cause_IPI(int cpu, int mesg);
52extern void iic_request_IPIs(void);
53extern void iic_setup_cpu(void);
54extern void iic_local_enable(void);
55extern void iic_local_disable(void);
56
57
58extern void spider_init_IRQ(void);
59extern int spider_get_irq(unsigned long int_pending);
60
61#endif
62#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
new file mode 100644
index 000000000000..74f999b4ac9e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -0,0 +1,381 @@
1/*
2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu,
6 * except for the handling of the high order bit that is required
7 * by the Spider bridge. These should be split into a separate
8 * file at the point where we get a different bridge chip.
9 *
10 * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
11 * Arnd Bergmann <arndb@de.ibm.com>
12 *
13 * Based on linear mapping
14 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 */
21
22#undef DEBUG
23
24#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/string.h>
28#include <linux/init.h>
29#include <linux/bootmem.h>
30#include <linux/mm.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/sections.h>
34#include <asm/iommu.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h>
41#include <asm/system.h>
42#include <asm/ppc-pci.h>
43
44#include "iommu.h"
45
46static inline unsigned long
47get_iopt_entry(unsigned long real_address, unsigned long ioid,
48 unsigned long prot)
49{
50 return (prot & IOPT_PROT_MASK)
51 | (IOPT_COHERENT)
52 | (IOPT_ORDER_VC)
53 | (real_address & IOPT_RPN_MASK)
54 | (ioid & IOPT_IOID_MASK);
55}
56
57typedef struct {
58 unsigned long val;
59} ioste;
60
61static inline ioste
62mk_ioste(unsigned long val)
63{
64 ioste ioste = { .val = val, };
65 return ioste;
66}
67
68static inline ioste
69get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
70{
71 unsigned long ps;
72 unsigned long iostep;
73 unsigned long nnpt;
74 unsigned long shift;
75
76 switch (page_size) {
77 case 0x1000000:
78 ps = IOST_PS_16M;
79 nnpt = 0; /* one page per segment */
80 shift = 5; /* segment has 16 iopt entries */
81 break;
82
83 case 0x100000:
84 ps = IOST_PS_1M;
85 nnpt = 0; /* one page per segment */
86 shift = 1; /* segment has 256 iopt entries */
87 break;
88
89 case 0x10000:
90 ps = IOST_PS_64K;
91 nnpt = 0x07; /* 8 pages per io page table */
92 shift = 0; /* all entries are used */
93 break;
94
95 case 0x1000:
96 ps = IOST_PS_4K;
97 nnpt = 0x7f; /* 128 pages per io page table */
98 shift = 0; /* all entries are used */
99 break;
100
101 default: /* not a known compile time constant */
102 {
103 /* BUILD_BUG_ON() is not usable here */
104 extern void __get_iost_entry_bad_page_size(void);
105 __get_iost_entry_bad_page_size();
106 }
107 break;
108 }
109
110 iostep = iopt_base +
111 /* need 8 bytes per iopte */
112 (((io_address / page_size * 8)
113 /* align io page tables on 4k page boundaries */
114 << shift)
115 /* nnpt+1 pages go into each iopt */
116 & ~(nnpt << 12));
117
118 nnpt++; /* this seems to work, but the documentation is not clear
119 about wether we put nnpt or nnpt-1 into the ioste bits.
120 In theory, this can't work for 4k pages. */
121 return mk_ioste(IOST_VALID_MASK
122 | (iostep & IOST_PT_BASE_MASK)
123 | ((nnpt << 5) & IOST_NNPT_MASK)
124 | (ps & IOST_PS_MASK));
125}
126
127/* compute the address of an io pte */
128static inline unsigned long
129get_ioptep(ioste iost_entry, unsigned long io_address)
130{
131 unsigned long iopt_base;
132 unsigned long page_size;
133 unsigned long page_number;
134 unsigned long iopt_offset;
135
136 iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
137 page_size = iost_entry.val & IOST_PS_MASK;
138
139 /* decode page size to compute page number */
140 page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
141 /* page number is an offset into the io page table */
142 iopt_offset = (page_number << 3) & 0x7fff8ul;
143 return iopt_base + iopt_offset;
144}
145
146/* compute the tag field of the iopt cache entry */
147static inline unsigned long
148get_ioc_tag(ioste iost_entry, unsigned long io_address)
149{
150 unsigned long iopte = get_ioptep(iost_entry, io_address);
151
152 return IOPT_VALID_MASK
153 | ((iopte & 0x00000000000000ff8ul) >> 3)
154 | ((iopte & 0x0000003fffffc0000ul) >> 9);
155}
156
157/* compute the hashed 6 bit index for the 4-way associative pte cache */
158static inline unsigned long
159get_ioc_hash(ioste iost_entry, unsigned long io_address)
160{
161 unsigned long iopte = get_ioptep(iost_entry, io_address);
162
163 return ((iopte & 0x000000000000001f8ul) >> 3)
164 ^ ((iopte & 0x00000000000020000ul) >> 17)
165 ^ ((iopte & 0x00000000000010000ul) >> 15)
166 ^ ((iopte & 0x00000000000008000ul) >> 13)
167 ^ ((iopte & 0x00000000000004000ul) >> 11)
168 ^ ((iopte & 0x00000000000002000ul) >> 9)
169 ^ ((iopte & 0x00000000000001000ul) >> 7);
170}
171
172/* same as above, but pretend that we have a simpler 1-way associative
173 pte cache with an 8 bit index */
174static inline unsigned long
175get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
176{
177 unsigned long iopte = get_ioptep(iost_entry, io_address);
178
179 return ((iopte & 0x000000000000001f8ul) >> 3)
180 ^ ((iopte & 0x00000000000020000ul) >> 17)
181 ^ ((iopte & 0x00000000000010000ul) >> 15)
182 ^ ((iopte & 0x00000000000008000ul) >> 13)
183 ^ ((iopte & 0x00000000000004000ul) >> 11)
184 ^ ((iopte & 0x00000000000002000ul) >> 9)
185 ^ ((iopte & 0x00000000000001000ul) >> 7)
186 ^ ((iopte & 0x0000000000000c000ul) >> 8);
187}
188
189static inline ioste
190get_iost_cache(void __iomem *base, unsigned long index)
191{
192 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
193 return mk_ioste(in_be64(&p[index]));
194}
195
196static inline void
197set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
198{
199 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
200 pr_debug("ioste %02lx was %016lx, store %016lx", index,
201 get_iost_cache(base, index).val, ste.val);
202 out_be64(&p[index], ste.val);
203 pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
204}
205
206static inline unsigned long
207get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
208{
209 unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
210 unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);
211
212 *tag = tags[index];
213 rmb();
214 return *p;
215}
216
217static inline void
218set_iopt_cache(void __iomem *base, unsigned long index,
219 unsigned long tag, unsigned long val)
220{
221 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
222 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
223 pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n",
224 index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag);
225
226 out_be64(p, val);
227 out_be64(&tags[index], tag);
228}
229
230static inline void
231set_iost_origin(void __iomem *base)
232{
233 unsigned long __iomem *p = base + IOC_ST_ORIGIN;
234 unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
235
236 pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
237 out_be64(p, origin);
238}
239
240static inline void
241set_iocmd_config(void __iomem *base)
242{
243 unsigned long __iomem *p = base + 0xc00;
244 unsigned long conf;
245
246 conf = in_be64(p);
247 pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
248 out_be64(p, conf | IOCMD_CONF_TE);
249}
250
251/* FIXME: get these from the device tree */
252#define ioc_base 0x20000511000ull
253#define ioc_mmio_base 0x20000510000ull
254#define ioid 0x48a
255#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */
256#define io_page_size 0x1000000
257
258static unsigned long map_iopt_entry(unsigned long address)
259{
260 switch (address >> 20) {
261 case 0x600:
262 address = 0x24020000000ull; /* spider i/o */
263 break;
264 default:
265 address += iopt_phys_offset;
266 break;
267 }
268
269 return get_iopt_entry(address, ioid, IOPT_PROT_RW);
270}
271
272static void iommu_bus_setup_null(struct pci_bus *b) { }
273static void iommu_dev_setup_null(struct pci_dev *d) { }
274
275/* initialize the iommu to support a simple linear mapping
276 * for each DMA window used by any device. For now, we
277 * happen to know that there is only one DMA window in use,
278 * starting at iopt_phys_offset. */
279static void cell_map_iommu(void)
280{
281 unsigned long address;
282 void __iomem *base;
283 ioste ioste;
284 unsigned long index;
285
286 base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE);
287 pr_debug("%lx mapped to %p\n", ioc_base, base);
288 set_iocmd_config(base);
289 iounmap(base);
290
291 base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE);
292 pr_debug("%lx mapped to %p\n", ioc_mmio_base, base);
293
294 set_iost_origin(base);
295
296 for (address = 0; address < 0x100000000ul; address += io_page_size) {
297 ioste = get_iost_entry(0x10000000000ul, address, io_page_size);
298 if ((address & 0xfffffff) == 0) /* segment start */
299 set_iost_cache(base, address >> 28, ioste);
300 index = get_ioc_hash_1way(ioste, address);
301 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
302 address, index, ioste.val);
303 set_iopt_cache(base,
304 get_ioc_hash_1way(ioste, address),
305 get_ioc_tag(ioste, address),
306 map_iopt_entry(address));
307 }
308 iounmap(base);
309}
310
311
312static void *cell_alloc_coherent(struct device *hwdev, size_t size,
313 dma_addr_t *dma_handle, gfp_t flag)
314{
315 void *ret;
316
317 ret = (void *)__get_free_pages(flag, get_order(size));
318 if (ret != NULL) {
319 memset(ret, 0, size);
320 *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
321 }
322 return ret;
323}
324
325static void cell_free_coherent(struct device *hwdev, size_t size,
326 void *vaddr, dma_addr_t dma_handle)
327{
328 free_pages((unsigned long)vaddr, get_order(size));
329}
330
331static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
332 size_t size, enum dma_data_direction direction)
333{
334 return virt_to_abs(ptr) | CELL_DMA_VALID;
335}
336
337static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
338 size_t size, enum dma_data_direction direction)
339{
340}
341
342static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
343 int nents, enum dma_data_direction direction)
344{
345 int i;
346
347 for (i = 0; i < nents; i++, sg++) {
348 sg->dma_address = (page_to_phys(sg->page) + sg->offset)
349 | CELL_DMA_VALID;
350 sg->dma_length = sg->length;
351 }
352
353 return nents;
354}
355
356static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
357 int nents, enum dma_data_direction direction)
358{
359}
360
361static int cell_dma_supported(struct device *dev, u64 mask)
362{
363 return mask < 0x100000000ull;
364}
365
366void cell_init_iommu(void)
367{
368 cell_map_iommu();
369
370 /* Direct I/O, IOMMU off */
371 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
372 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
373
374 pci_dma_ops.alloc_coherent = cell_alloc_coherent;
375 pci_dma_ops.free_coherent = cell_free_coherent;
376 pci_dma_ops.map_single = cell_map_single;
377 pci_dma_ops.unmap_single = cell_unmap_single;
378 pci_dma_ops.map_sg = cell_map_sg;
379 pci_dma_ops.unmap_sg = cell_unmap_sg;
380 pci_dma_ops.dma_supported = cell_dma_supported;
381}
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
new file mode 100644
index 000000000000..490d77abfe85
--- /dev/null
+++ b/arch/powerpc/platforms/cell/iommu.h
@@ -0,0 +1,65 @@
1#ifndef CELL_IOMMU_H
2#define CELL_IOMMU_H
3
4/* some constants */
5enum {
6 /* segment table entries */
7 IOST_VALID_MASK = 0x8000000000000000ul,
8 IOST_TAG_MASK = 0x3000000000000000ul,
9 IOST_PT_BASE_MASK = 0x000003fffffff000ul,
10 IOST_NNPT_MASK = 0x0000000000000fe0ul,
11 IOST_PS_MASK = 0x000000000000000ful,
12
13 IOST_PS_4K = 0x1,
14 IOST_PS_64K = 0x3,
15 IOST_PS_1M = 0x5,
16 IOST_PS_16M = 0x7,
17
18 /* iopt tag register */
19 IOPT_VALID_MASK = 0x0000000200000000ul,
20 IOPT_TAG_MASK = 0x00000001fffffffful,
21
22 /* iopt cache register */
23 IOPT_PROT_MASK = 0xc000000000000000ul,
24 IOPT_PROT_NONE = 0x0000000000000000ul,
25 IOPT_PROT_READ = 0x4000000000000000ul,
26 IOPT_PROT_WRITE = 0x8000000000000000ul,
27 IOPT_PROT_RW = 0xc000000000000000ul,
28 IOPT_COHERENT = 0x2000000000000000ul,
29
30 IOPT_ORDER_MASK = 0x1800000000000000ul,
31 /* order access to same IOID/VC on same address */
32 IOPT_ORDER_ADDR = 0x0800000000000000ul,
33 /* similar, but only after a write access */
34 IOPT_ORDER_WRITES = 0x1000000000000000ul,
35 /* Order all accesses to same IOID/VC */
36 IOPT_ORDER_VC = 0x1800000000000000ul,
37
38 IOPT_RPN_MASK = 0x000003fffffff000ul,
39 IOPT_HINT_MASK = 0x0000000000000800ul,
40 IOPT_IOID_MASK = 0x00000000000007fful,
41
42 IOSTO_ENABLE = 0x8000000000000000ul,
43 IOSTO_ORIGIN = 0x000003fffffff000ul,
44 IOSTO_HW = 0x0000000000000800ul,
45 IOSTO_SW = 0x0000000000000400ul,
46
47 IOCMD_CONF_TE = 0x0000800000000000ul,
48
49 /* memory mapped registers */
50 IOC_PT_CACHE_DIR = 0x000,
51 IOC_ST_CACHE_DIR = 0x800,
52 IOC_PT_CACHE_REG = 0x910,
53 IOC_ST_ORIGIN = 0x918,
54 IOC_CONF = 0x930,
55
56 /* The high bit needs to be set on every DMA address,
57 only 2GB are addressable */
58 CELL_DMA_VALID = 0x80000000,
59 CELL_DMA_MASK = 0x7fffffff,
60};
61
62
63void cell_init_iommu(void);
64
65#endif
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
new file mode 100644
index 000000000000..9a495634d0c2
--- /dev/null
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -0,0 +1,141 @@
1/*
2 * linux/arch/powerpc/platforms/cell/cell_setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/reboot.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/irq.h>
29#include <linux/seq_file.h>
30#include <linux/root_dev.h>
31#include <linux/console.h>
32
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/io.h>
36#include <asm/pgtable.h>
37#include <asm/prom.h>
38#include <asm/rtas.h>
39#include <asm/pci-bridge.h>
40#include <asm/iommu.h>
41#include <asm/dma.h>
42#include <asm/machdep.h>
43#include <asm/time.h>
44#include <asm/nvram.h>
45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
47#include <asm/irq.h>
48
49#include "interrupt.h"
50#include "iommu.h"
51
52#ifdef DEBUG
53#define DBG(fmt...) udbg_printf(fmt)
54#else
55#define DBG(fmt...)
56#endif
57
58void cell_show_cpuinfo(struct seq_file *m)
59{
60 struct device_node *root;
61 const char *model = "";
62
63 root = of_find_node_by_path("/");
64 if (root)
65 model = get_property(root, "model", NULL);
66 seq_printf(m, "machine\t\t: CHRP %s\n", model);
67 of_node_put(root);
68}
69
70static void cell_progress(char *s, unsigned short hex)
71{
72 printk("*** %04x : %s\n", hex, s ? s : "");
73}
74
75static void __init cell_setup_arch(void)
76{
77 ppc_md.init_IRQ = iic_init_IRQ;
78 ppc_md.get_irq = iic_get_irq;
79
80#ifdef CONFIG_SMP
81 smp_init_cell();
82#endif
83
84 /* init to some ~sane value until calibrate_delay() runs */
85 loops_per_jiffy = 50000000;
86
87 if (ROOT_DEV == 0) {
88 printk("No ramdisk, default root is /dev/hda2\n");
89 ROOT_DEV = Root_HDA2;
90 }
91
92 /* Find and initialize PCI host bridges */
93 init_pci_config_tokens();
94 find_and_init_phbs();
95 spider_init_IRQ();
96#ifdef CONFIG_DUMMY_CONSOLE
97 conswitchp = &dummy_con;
98#endif
99
100 mmio_nvram_init();
101}
102
103/*
104 * Early initialization. Relocation is on but do not reference unbolted pages
105 */
106static void __init cell_init_early(void)
107{
108 DBG(" -> cell_init_early()\n");
109
110 hpte_init_native();
111
112 cell_init_iommu();
113
114 ppc64_interrupt_controller = IC_CELL_PIC;
115
116 DBG(" <- cell_init_early()\n");
117}
118
119
120static int __init cell_probe(int platform)
121{
122 if (platform != PLATFORM_CELL)
123 return 0;
124
125 return 1;
126}
127
128struct machdep_calls __initdata cell_md = {
129 .probe = cell_probe,
130 .setup_arch = cell_setup_arch,
131 .init_early = cell_init_early,
132 .show_cpuinfo = cell_show_cpuinfo,
133 .restart = rtas_restart,
134 .power_off = rtas_power_off,
135 .halt = rtas_halt,
136 .get_boot_time = rtas_get_boot_time,
137 .get_rtc_time = rtas_get_rtc_time,
138 .set_rtc_time = rtas_set_rtc_time,
139 .calibrate_decr = generic_calibrate_decr,
140 .progress = cell_progress,
141};
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
new file mode 100644
index 000000000000..de96eadf419d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -0,0 +1,230 @@
1/*
2 * SMP support for BPA machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/sysdev.h>
29#include <linux/cpu.h>
30
31#include <asm/ptrace.h>
32#include <asm/atomic.h>
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/smp.h>
39#include <asm/paca.h>
40#include <asm/time.h>
41#include <asm/machdep.h>
42#include <asm/cputable.h>
43#include <asm/firmware.h>
44#include <asm/system.h>
45#include <asm/rtas.h>
46
47#include "interrupt.h"
48
49#ifdef DEBUG
50#define DBG(fmt...) udbg_printf(fmt)
51#else
52#define DBG(fmt...)
53#endif
54
55/*
56 * The primary thread of each non-boot processor is recorded here before
57 * smp init.
58 */
59static cpumask_t of_spin_map;
60
61extern void pSeries_secondary_smp_init(unsigned long);
62
63/**
64 * smp_startup_cpu() - start the given cpu
65 *
66 * At boot time, there is nothing to do for primary threads which were
67 * started from Open Firmware. For anything else, call RTAS with the
68 * appropriate start location.
69 *
70 * Returns:
71 * 0 - failure
72 * 1 - success
73 */
74static inline int __devinit smp_startup_cpu(unsigned int lcpu)
75{
76 int status;
77 unsigned long start_here = __pa((u32)*((unsigned long *)
78 pSeries_secondary_smp_init));
79 unsigned int pcpu;
80 int start_cpu;
81
82 if (cpu_isset(lcpu, of_spin_map))
83 /* Already started by OF and sitting in spin loop */
84 return 1;
85
86 pcpu = get_hard_smp_processor_id(lcpu);
87
88 /* Fixup atomic count: it exited inside IRQ handler. */
89 paca[lcpu].__current->thread_info->preempt_count = 0;
90
91 /*
92 * If the RTAS start-cpu token does not exist then presume the
93 * cpu is already spinning.
94 */
95 start_cpu = rtas_token("start-cpu");
96 if (start_cpu == RTAS_UNKNOWN_SERVICE)
97 return 1;
98
99 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
100 if (status != 0) {
101 printk(KERN_ERR "start-cpu failed: %i\n", status);
102 return 0;
103 }
104
105 return 1;
106}
107
108static void smp_iic_message_pass(int target, int msg)
109{
110 unsigned int i;
111
112 if (target < NR_CPUS) {
113 iic_cause_IPI(target, msg);
114 } else {
115 for_each_online_cpu(i) {
116 if (target == MSG_ALL_BUT_SELF
117 && i == smp_processor_id())
118 continue;
119 iic_cause_IPI(i, msg);
120 }
121 }
122}
123
124static int __init smp_iic_probe(void)
125{
126 iic_request_IPIs();
127
128 return cpus_weight(cpu_possible_map);
129}
130
131static void __devinit smp_iic_setup_cpu(int cpu)
132{
133 if (cpu != boot_cpuid)
134 iic_setup_cpu();
135}
136
137static DEFINE_SPINLOCK(timebase_lock);
138static unsigned long timebase = 0;
139
140static void __devinit cell_give_timebase(void)
141{
142 spin_lock(&timebase_lock);
143 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
144 timebase = get_tb();
145 spin_unlock(&timebase_lock);
146
147 while (timebase)
148 barrier();
149 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
150}
151
152static void __devinit cell_take_timebase(void)
153{
154 while (!timebase)
155 barrier();
156 spin_lock(&timebase_lock);
157 set_tb(timebase >> 32, timebase & 0xffffffff);
158 timebase = 0;
159 spin_unlock(&timebase_lock);
160}
161
162static void __devinit smp_cell_kick_cpu(int nr)
163{
164 BUG_ON(nr < 0 || nr >= NR_CPUS);
165
166 if (!smp_startup_cpu(nr))
167 return;
168
169 /*
170 * The processor is currently spinning, waiting for the
171 * cpu_start field to become non-zero After we set cpu_start,
172 * the processor will continue on to secondary_start
173 */
174 paca[nr].cpu_start = 1;
175}
176
177static int smp_cell_cpu_bootable(unsigned int nr)
178{
179 /* Special case - we inhibit secondary thread startup
180 * during boot if the user requests it. Odd-numbered
181 * cpus are assumed to be secondary threads.
182 */
183 if (system_state < SYSTEM_RUNNING &&
184 cpu_has_feature(CPU_FTR_SMT) &&
185 !smt_enabled_at_boot && nr % 2 != 0)
186 return 0;
187
188 return 1;
189}
190static struct smp_ops_t bpa_iic_smp_ops = {
191 .message_pass = smp_iic_message_pass,
192 .probe = smp_iic_probe,
193 .kick_cpu = smp_cell_kick_cpu,
194 .setup_cpu = smp_iic_setup_cpu,
195 .cpu_bootable = smp_cell_cpu_bootable,
196};
197
198/* This is called very early */
199void __init smp_init_cell(void)
200{
201 int i;
202
203 DBG(" -> smp_init_cell()\n");
204
205 smp_ops = &bpa_iic_smp_ops;
206
207 /* Mark threads which are still spinning in hold loops. */
208 if (cpu_has_feature(CPU_FTR_SMT)) {
209 for_each_present_cpu(i) {
210 if (i % 2 == 0)
211 /*
212 * Even-numbered logical cpus correspond to
213 * primary threads.
214 */
215 cpu_set(i, of_spin_map);
216 }
217 } else {
218 of_spin_map = cpu_present_map;
219 }
220
221 cpu_clear(boot_cpuid, of_spin_map);
222
223 /* Non-lpar has additional take/give timebase */
224 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
225 smp_ops->give_timebase = cell_give_timebase;
226 smp_ops->take_timebase = cell_take_timebase;
227 }
228
229 DBG(" <- smp_init_cell()\n");
230}
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
new file mode 100644
index 000000000000..e74132188bdf
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -0,0 +1,191 @@
1/*
2 * External Interrupt Controller on Spider South Bridge
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25
26#include <asm/pgtable.h>
27#include <asm/prom.h>
28#include <asm/io.h>
29
30#include "interrupt.h"
31
32/* register layout taken from Spider spec, table 7.4-4 */
33enum {
34 TIR_DEN = 0x004, /* Detection Enable Register */
35 TIR_MSK = 0x084, /* Mask Level Register */
36 TIR_EDC = 0x0c0, /* Edge Detection Clear Register */
37 TIR_PNDA = 0x100, /* Pending Register A */
38 TIR_PNDB = 0x104, /* Pending Register B */
39 TIR_CS = 0x144, /* Current Status Register */
40 TIR_LCSA = 0x150, /* Level Current Status Register A */
41 TIR_LCSB = 0x154, /* Level Current Status Register B */
42 TIR_LCSC = 0x158, /* Level Current Status Register C */
43 TIR_LCSD = 0x15c, /* Level Current Status Register D */
44 TIR_CFGA = 0x200, /* Setting Register A0 */
45 TIR_CFGB = 0x204, /* Setting Register B0 */
46 /* 0x208 ... 0x3ff Setting Register An/Bn */
47 TIR_PPNDA = 0x400, /* Packet Pending Register A */
48 TIR_PPNDB = 0x404, /* Packet Pending Register B */
49 TIR_PIERA = 0x408, /* Packet Output Error Register A */
50 TIR_PIERB = 0x40c, /* Packet Output Error Register B */
51 TIR_PIEN = 0x444, /* Packet Output Enable Register */
52 TIR_PIPND = 0x454, /* Packet Output Pending Register */
53 TIRDID = 0x484, /* Spider Device ID Register */
54 REISTIM = 0x500, /* Reissue Command Timeout Time Setting */
55 REISTIMEN = 0x504, /* Reissue Command Timeout Setting */
56 REISWAITEN = 0x508, /* Reissue Wait Control*/
57};
58
59static void __iomem *spider_pics[4];
60
61static void __iomem *spider_get_pic(int irq)
62{
63 int node = irq / IIC_NODE_STRIDE;
64 irq %= IIC_NODE_STRIDE;
65
66 if (irq >= IIC_EXT_OFFSET &&
67 irq < IIC_EXT_OFFSET + IIC_NUM_EXT &&
68 spider_pics)
69 return spider_pics[node];
70 return NULL;
71}
72
73static int spider_get_nr(unsigned int irq)
74{
75 return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET;
76}
77
78static void __iomem *spider_get_irq_config(int irq)
79{
80 void __iomem *pic;
81 pic = spider_get_pic(irq);
82 return pic + TIR_CFGA + 8 * spider_get_nr(irq);
83}
84
85static void spider_enable_irq(unsigned int irq)
86{
87 void __iomem *cfg = spider_get_irq_config(irq);
88 irq = spider_get_nr(irq);
89
90 out_be32(cfg, in_be32(cfg) | 0x3107000eu);
91 out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq);
92}
93
94static void spider_disable_irq(unsigned int irq)
95{
96 void __iomem *cfg = spider_get_irq_config(irq);
97 irq = spider_get_nr(irq);
98
99 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
100}
101
102static unsigned int spider_startup_irq(unsigned int irq)
103{
104 spider_enable_irq(irq);
105 return 0;
106}
107
108static void spider_shutdown_irq(unsigned int irq)
109{
110 spider_disable_irq(irq);
111}
112
113static void spider_end_irq(unsigned int irq)
114{
115 spider_enable_irq(irq);
116}
117
118static void spider_ack_irq(unsigned int irq)
119{
120 spider_disable_irq(irq);
121 iic_local_enable();
122}
123
124static struct hw_interrupt_type spider_pic = {
125 .typename = " SPIDER ",
126 .startup = spider_startup_irq,
127 .shutdown = spider_shutdown_irq,
128 .enable = spider_enable_irq,
129 .disable = spider_disable_irq,
130 .ack = spider_ack_irq,
131 .end = spider_end_irq,
132};
133
134
135int spider_get_irq(unsigned long int_pending)
136{
137 void __iomem *regs = spider_get_pic(int_pending);
138 unsigned long cs;
139 int irq;
140
141 cs = in_be32(regs + TIR_CS);
142
143 irq = cs >> 24;
144 if (irq != 63)
145 return irq;
146
147 return -1;
148}
149
150void spider_init_IRQ(void)
151{
152 int node;
153 struct device_node *dn;
154 unsigned int *property;
155 long spiderpic;
156 int n;
157
158/* FIXME: detect multiple PICs as soon as the device tree has them */
159 for (node = 0; node < 1; node++) {
160 dn = of_find_node_by_path("/");
161 n = prom_n_addr_cells(dn);
162 property = (unsigned int *) get_property(dn,
163 "platform-spider-pic", NULL);
164
165 if (!property)
166 continue;
167 for (spiderpic = 0; n > 0; --n)
168 spiderpic = (spiderpic << 32) + *property++;
169 printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic);
170 spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE);
171 for (n = 0; n < IIC_NUM_EXT; n++) {
172 int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
173 get_irq_desc(irq)->handler = &spider_pic;
174
175 /* do not mask any interrupts because of level */
176 out_be32(spider_pics[node] + TIR_MSK, 0x0);
177
178 /* disable edge detection clear */
179 /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
180
181 /* enable interrupt packets to be output */
182 out_be32(spider_pics[node] + TIR_PIEN,
183 in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
184
185 /* Enable the interrupt detection enable bit. Do this last! */
186 out_be32(spider_pics[node] + TIR_DEN,
187 in_be32(spider_pics[node] +TIR_DEN) | 0x1);
188
189 }
190 }
191}