diff options
Diffstat (limited to 'arch/ppc64/kernel')
29 files changed, 351 insertions, 5032 deletions
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index 327c08ce4291..c441aebe7648 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -12,8 +12,7 @@ obj-y := misc.o prom.o | |||
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-y += irq.o idle.o dma.o \ | 14 | obj-y += irq.o idle.o dma.o \ |
15 | signal.o \ | 15 | align.o pacaData.o \ |
16 | align.o bitops.o pacaData.o \ | ||
17 | udbg.o ioctl32.o \ | 16 | udbg.o ioctl32.o \ |
18 | rtc.o \ | 17 | rtc.o \ |
19 | cpu_setup_power4.o \ | 18 | cpu_setup_power4.o \ |
@@ -29,22 +28,16 @@ ifneq ($(CONFIG_PPC_MERGE),y) | |||
29 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o | 28 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o |
30 | endif | 29 | endif |
31 | 30 | ||
32 | obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o | 31 | obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o |
33 | |||
34 | obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ | ||
35 | bpa_iic.o spider-pic.o | ||
36 | 32 | ||
37 | obj-$(CONFIG_KEXEC) += machine_kexec.o | 33 | obj-$(CONFIG_KEXEC) += machine_kexec.o |
38 | obj-$(CONFIG_EEH) += eeh.o | 34 | obj-$(CONFIG_EEH) += eeh.o |
39 | obj-$(CONFIG_PROC_FS) += proc_ppc64.o | 35 | obj-$(CONFIG_PROC_FS) += proc_ppc64.o |
40 | obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o | ||
41 | obj-$(CONFIG_SMP) += smp.o | ||
42 | obj-$(CONFIG_MODULES) += module.o | 36 | obj-$(CONFIG_MODULES) += module.o |
43 | ifneq ($(CONFIG_PPC_MERGE),y) | 37 | ifneq ($(CONFIG_PPC_MERGE),y) |
44 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 38 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
45 | endif | 39 | endif |
46 | obj-$(CONFIG_PPC_RTAS) += rtas_pci.o | 40 | obj-$(CONFIG_PPC_RTAS) += rtas_pci.o |
47 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o | ||
48 | obj-$(CONFIG_SCANLOG) += scanlog.o | 41 | obj-$(CONFIG_SCANLOG) += scanlog.o |
49 | obj-$(CONFIG_LPARCFG) += lparcfg.o | 42 | obj-$(CONFIG_LPARCFG) += lparcfg.o |
50 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o | 43 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o |
@@ -57,11 +50,6 @@ obj-$(CONFIG_PPC_PMAC) += udbg_scc.o | |||
57 | 50 | ||
58 | obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o | 51 | obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o |
59 | 52 | ||
60 | ifdef CONFIG_SMP | ||
61 | obj-$(CONFIG_PPC_PMAC) += smp-tbsync.o | ||
62 | obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o | ||
63 | endif | ||
64 | |||
65 | obj-$(CONFIG_KPROBES) += kprobes.o | 53 | obj-$(CONFIG_KPROBES) += kprobes.o |
66 | 54 | ||
67 | CFLAGS_ioctl32.o += -Ifs/ | 55 | CFLAGS_ioctl32.o += -Ifs/ |
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c index 5e6046cb414e..bce9065da6cb 100644 --- a/arch/ppc64/kernel/asm-offsets.c +++ b/arch/ppc64/kernel/asm-offsets.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #include <asm/paca.h> | 32 | #include <asm/paca.h> |
33 | #include <asm/lppaca.h> | 33 | #include <asm/lppaca.h> |
34 | #include <asm/iSeries/HvLpEvent.h> | 34 | #include <asm/iseries/hv_lp_event.h> |
35 | #include <asm/rtas.h> | 35 | #include <asm/rtas.h> |
36 | #include <asm/cputable.h> | 36 | #include <asm/cputable.h> |
37 | #include <asm/cache.h> | 37 | #include <asm/cache.h> |
@@ -93,6 +93,9 @@ int main(void) | |||
93 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | 93 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
94 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | 94 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); |
95 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 95 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
96 | #ifdef CONFIG_PPC_64K_PAGES | ||
97 | DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir)); | ||
98 | #endif | ||
96 | #ifdef CONFIG_HUGETLB_PAGE | 99 | #ifdef CONFIG_HUGETLB_PAGE |
97 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); | 100 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); |
98 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); | 101 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); |
diff --git a/arch/ppc64/kernel/bitops.c b/arch/ppc64/kernel/bitops.c deleted file mode 100644 index ae329e8b4acb..000000000000 --- a/arch/ppc64/kernel/bitops.c +++ /dev/null | |||
@@ -1,147 +0,0 @@ | |||
1 | /* | ||
2 | * These are too big to be inlined. | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/bitops.h> | ||
8 | #include <asm/byteorder.h> | ||
9 | |||
10 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | ||
11 | unsigned long offset) | ||
12 | { | ||
13 | const unsigned long *p = addr + (offset >> 6); | ||
14 | unsigned long result = offset & ~63UL; | ||
15 | unsigned long tmp; | ||
16 | |||
17 | if (offset >= size) | ||
18 | return size; | ||
19 | size -= result; | ||
20 | offset &= 63UL; | ||
21 | if (offset) { | ||
22 | tmp = *(p++); | ||
23 | tmp |= ~0UL >> (64 - offset); | ||
24 | if (size < 64) | ||
25 | goto found_first; | ||
26 | if (~tmp) | ||
27 | goto found_middle; | ||
28 | size -= 64; | ||
29 | result += 64; | ||
30 | } | ||
31 | while (size & ~63UL) { | ||
32 | if (~(tmp = *(p++))) | ||
33 | goto found_middle; | ||
34 | result += 64; | ||
35 | size -= 64; | ||
36 | } | ||
37 | if (!size) | ||
38 | return result; | ||
39 | tmp = *p; | ||
40 | |||
41 | found_first: | ||
42 | tmp |= ~0UL << size; | ||
43 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
44 | return result + size; /* Nope. */ | ||
45 | found_middle: | ||
46 | return result + ffz(tmp); | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(find_next_zero_bit); | ||
50 | |||
51 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | ||
52 | unsigned long offset) | ||
53 | { | ||
54 | const unsigned long *p = addr + (offset >> 6); | ||
55 | unsigned long result = offset & ~63UL; | ||
56 | unsigned long tmp; | ||
57 | |||
58 | if (offset >= size) | ||
59 | return size; | ||
60 | size -= result; | ||
61 | offset &= 63UL; | ||
62 | if (offset) { | ||
63 | tmp = *(p++); | ||
64 | tmp &= (~0UL << offset); | ||
65 | if (size < 64) | ||
66 | goto found_first; | ||
67 | if (tmp) | ||
68 | goto found_middle; | ||
69 | size -= 64; | ||
70 | result += 64; | ||
71 | } | ||
72 | while (size & ~63UL) { | ||
73 | if ((tmp = *(p++))) | ||
74 | goto found_middle; | ||
75 | result += 64; | ||
76 | size -= 64; | ||
77 | } | ||
78 | if (!size) | ||
79 | return result; | ||
80 | tmp = *p; | ||
81 | |||
82 | found_first: | ||
83 | tmp &= (~0UL >> (64 - size)); | ||
84 | if (tmp == 0UL) /* Are any bits set? */ | ||
85 | return result + size; /* Nope. */ | ||
86 | found_middle: | ||
87 | return result + __ffs(tmp); | ||
88 | } | ||
89 | |||
90 | EXPORT_SYMBOL(find_next_bit); | ||
91 | |||
92 | static inline unsigned int ext2_ilog2(unsigned int x) | ||
93 | { | ||
94 | int lz; | ||
95 | |||
96 | asm("cntlzw %0,%1": "=r"(lz):"r"(x)); | ||
97 | return 31 - lz; | ||
98 | } | ||
99 | |||
100 | static inline unsigned int ext2_ffz(unsigned int x) | ||
101 | { | ||
102 | u32 rc; | ||
103 | if ((x = ~x) == 0) | ||
104 | return 32; | ||
105 | rc = ext2_ilog2(x & -x); | ||
106 | return rc; | ||
107 | } | ||
108 | |||
109 | unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, | ||
110 | unsigned long offset) | ||
111 | { | ||
112 | const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5); | ||
113 | unsigned int result = offset & ~31; | ||
114 | unsigned int tmp; | ||
115 | |||
116 | if (offset >= size) | ||
117 | return size; | ||
118 | size -= result; | ||
119 | offset &= 31; | ||
120 | if (offset) { | ||
121 | tmp = cpu_to_le32p(p++); | ||
122 | tmp |= ~0U >> (32 - offset); /* bug or feature ? */ | ||
123 | if (size < 32) | ||
124 | goto found_first; | ||
125 | if (tmp != ~0) | ||
126 | goto found_middle; | ||
127 | size -= 32; | ||
128 | result += 32; | ||
129 | } | ||
130 | while (size >= 32) { | ||
131 | if ((tmp = cpu_to_le32p(p++)) != ~0) | ||
132 | goto found_middle; | ||
133 | result += 32; | ||
134 | size -= 32; | ||
135 | } | ||
136 | if (!size) | ||
137 | return result; | ||
138 | tmp = cpu_to_le32p(p); | ||
139 | found_first: | ||
140 | tmp |= ~0 << size; | ||
141 | if (tmp == ~0) /* Are any bits zero? */ | ||
142 | return result + size; /* Nope. */ | ||
143 | found_middle: | ||
144 | return result + ext2_ffz(tmp); | ||
145 | } | ||
146 | |||
147 | EXPORT_SYMBOL(find_next_zero_le_bit); | ||
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/ppc64/kernel/bpa_iic.c deleted file mode 100644 index 0aaa878e19d3..000000000000 --- a/arch/ppc64/kernel/bpa_iic.c +++ /dev/null | |||
@@ -1,284 +0,0 @@ | |||
1 | /* | ||
2 | * BPA Internal Interrupt Controller | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include <asm/io.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/ptrace.h> | ||
33 | |||
34 | #include "bpa_iic.h" | ||
35 | |||
36 | struct iic_pending_bits { | ||
37 | u32 data; | ||
38 | u8 flags; | ||
39 | u8 class; | ||
40 | u8 source; | ||
41 | u8 prio; | ||
42 | }; | ||
43 | |||
44 | enum iic_pending_flags { | ||
45 | IIC_VALID = 0x80, | ||
46 | IIC_IPI = 0x40, | ||
47 | }; | ||
48 | |||
49 | struct iic_regs { | ||
50 | struct iic_pending_bits pending; | ||
51 | struct iic_pending_bits pending_destr; | ||
52 | u64 generate; | ||
53 | u64 prio; | ||
54 | }; | ||
55 | |||
56 | struct iic { | ||
57 | struct iic_regs __iomem *regs; | ||
58 | }; | ||
59 | |||
60 | static DEFINE_PER_CPU(struct iic, iic); | ||
61 | |||
62 | void iic_local_enable(void) | ||
63 | { | ||
64 | out_be64(&__get_cpu_var(iic).regs->prio, 0xff); | ||
65 | } | ||
66 | |||
67 | void iic_local_disable(void) | ||
68 | { | ||
69 | out_be64(&__get_cpu_var(iic).regs->prio, 0x0); | ||
70 | } | ||
71 | |||
72 | static unsigned int iic_startup(unsigned int irq) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void iic_enable(unsigned int irq) | ||
78 | { | ||
79 | iic_local_enable(); | ||
80 | } | ||
81 | |||
82 | static void iic_disable(unsigned int irq) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | static void iic_end(unsigned int irq) | ||
87 | { | ||
88 | iic_local_enable(); | ||
89 | } | ||
90 | |||
91 | static struct hw_interrupt_type iic_pic = { | ||
92 | .typename = " BPA-IIC ", | ||
93 | .startup = iic_startup, | ||
94 | .enable = iic_enable, | ||
95 | .disable = iic_disable, | ||
96 | .end = iic_end, | ||
97 | }; | ||
98 | |||
99 | static int iic_external_get_irq(struct iic_pending_bits pending) | ||
100 | { | ||
101 | int irq; | ||
102 | unsigned char node, unit; | ||
103 | |||
104 | node = pending.source >> 4; | ||
105 | unit = pending.source & 0xf; | ||
106 | irq = -1; | ||
107 | |||
108 | /* | ||
109 | * This mapping is specific to the Broadband | ||
110 | * Engine. We might need to get the numbers | ||
111 | * from the device tree to support future CPUs. | ||
112 | */ | ||
113 | switch (unit) { | ||
114 | case 0x00: | ||
115 | case 0x0b: | ||
116 | /* | ||
117 | * One of these units can be connected | ||
118 | * to an external interrupt controller. | ||
119 | */ | ||
120 | if (pending.prio > 0x3f || | ||
121 | pending.class != 2) | ||
122 | break; | ||
123 | irq = IIC_EXT_OFFSET | ||
124 | + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE) | ||
125 | + node * IIC_NODE_STRIDE; | ||
126 | break; | ||
127 | case 0x01 ... 0x04: | ||
128 | case 0x07 ... 0x0a: | ||
129 | /* | ||
130 | * These units are connected to the SPEs | ||
131 | */ | ||
132 | if (pending.class > 2) | ||
133 | break; | ||
134 | irq = IIC_SPE_OFFSET | ||
135 | + pending.class * IIC_CLASS_STRIDE | ||
136 | + node * IIC_NODE_STRIDE | ||
137 | + unit; | ||
138 | break; | ||
139 | } | ||
140 | if (irq == -1) | ||
141 | printk(KERN_WARNING "Unexpected interrupt class %02x, " | ||
142 | "source %02x, prio %02x, cpu %02x\n", pending.class, | ||
143 | pending.source, pending.prio, smp_processor_id()); | ||
144 | return irq; | ||
145 | } | ||
146 | |||
147 | /* Get an IRQ number from the pending state register of the IIC */ | ||
148 | int iic_get_irq(struct pt_regs *regs) | ||
149 | { | ||
150 | struct iic *iic; | ||
151 | int irq; | ||
152 | struct iic_pending_bits pending; | ||
153 | |||
154 | iic = &__get_cpu_var(iic); | ||
155 | *(unsigned long *) &pending = | ||
156 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | ||
157 | |||
158 | irq = -1; | ||
159 | if (pending.flags & IIC_VALID) { | ||
160 | if (pending.flags & IIC_IPI) { | ||
161 | irq = IIC_IPI_OFFSET + (pending.prio >> 4); | ||
162 | /* | ||
163 | if (irq > 0x80) | ||
164 | printk(KERN_WARNING "Unexpected IPI prio %02x" | ||
165 | "on CPU %02x\n", pending.prio, | ||
166 | smp_processor_id()); | ||
167 | */ | ||
168 | } else { | ||
169 | irq = iic_external_get_irq(pending); | ||
170 | } | ||
171 | } | ||
172 | return irq; | ||
173 | } | ||
174 | |||
175 | static struct iic_regs __iomem *find_iic(int cpu) | ||
176 | { | ||
177 | struct device_node *np; | ||
178 | int nodeid = cpu / 2; | ||
179 | unsigned long regs; | ||
180 | struct iic_regs __iomem *iic_regs; | ||
181 | |||
182 | for (np = of_find_node_by_type(NULL, "cpu"); | ||
183 | np; | ||
184 | np = of_find_node_by_type(np, "cpu")) { | ||
185 | if (nodeid == *(int *)get_property(np, "node-id", NULL)) | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | if (!np) { | ||
190 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | ||
191 | iic_regs = NULL; | ||
192 | } else { | ||
193 | regs = *(long *)get_property(np, "iic", NULL); | ||
194 | |||
195 | /* hack until we have decided on the devtree info */ | ||
196 | regs += 0x400; | ||
197 | if (cpu & 1) | ||
198 | regs += 0x20; | ||
199 | |||
200 | printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); | ||
201 | iic_regs = __ioremap(regs, sizeof(struct iic_regs), | ||
202 | _PAGE_NO_CACHE); | ||
203 | } | ||
204 | return iic_regs; | ||
205 | } | ||
206 | |||
207 | #ifdef CONFIG_SMP | ||
208 | |||
209 | /* Use the highest interrupt priorities for IPI */ | ||
210 | static inline int iic_ipi_to_irq(int ipi) | ||
211 | { | ||
212 | return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; | ||
213 | } | ||
214 | |||
215 | static inline int iic_irq_to_ipi(int irq) | ||
216 | { | ||
217 | return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); | ||
218 | } | ||
219 | |||
220 | void iic_setup_cpu(void) | ||
221 | { | ||
222 | out_be64(&__get_cpu_var(iic).regs->prio, 0xff); | ||
223 | } | ||
224 | |||
225 | void iic_cause_IPI(int cpu, int mesg) | ||
226 | { | ||
227 | out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); | ||
228 | } | ||
229 | |||
230 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | ||
231 | { | ||
232 | smp_message_recv(iic_irq_to_ipi(irq), regs); | ||
233 | return IRQ_HANDLED; | ||
234 | } | ||
235 | |||
236 | static void iic_request_ipi(int ipi, const char *name) | ||
237 | { | ||
238 | int irq; | ||
239 | |||
240 | irq = iic_ipi_to_irq(ipi); | ||
241 | /* IPIs are marked SA_INTERRUPT as they must run with irqs | ||
242 | * disabled */ | ||
243 | get_irq_desc(irq)->handler = &iic_pic; | ||
244 | get_irq_desc(irq)->status |= IRQ_PER_CPU; | ||
245 | request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL); | ||
246 | } | ||
247 | |||
248 | void iic_request_IPIs(void) | ||
249 | { | ||
250 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | ||
251 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | ||
252 | #ifdef CONFIG_DEBUGGER | ||
253 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | ||
254 | #endif /* CONFIG_DEBUGGER */ | ||
255 | } | ||
256 | #endif /* CONFIG_SMP */ | ||
257 | |||
258 | static void iic_setup_spe_handlers(void) | ||
259 | { | ||
260 | int be, isrc; | ||
261 | |||
262 | /* Assume two threads per BE are present */ | ||
263 | for (be=0; be < num_present_cpus() / 2; be++) { | ||
264 | for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { | ||
265 | int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; | ||
266 | get_irq_desc(irq)->handler = &iic_pic; | ||
267 | } | ||
268 | } | ||
269 | } | ||
270 | |||
271 | void iic_init_IRQ(void) | ||
272 | { | ||
273 | int cpu, irq_offset; | ||
274 | struct iic *iic; | ||
275 | |||
276 | irq_offset = 0; | ||
277 | for_each_cpu(cpu) { | ||
278 | iic = &per_cpu(iic, cpu); | ||
279 | iic->regs = find_iic(cpu); | ||
280 | if (iic->regs) | ||
281 | out_be64(&iic->regs->prio, 0xff); | ||
282 | } | ||
283 | iic_setup_spe_handlers(); | ||
284 | } | ||
diff --git a/arch/ppc64/kernel/bpa_iic.h b/arch/ppc64/kernel/bpa_iic.h deleted file mode 100644 index 6833c3022166..000000000000 --- a/arch/ppc64/kernel/bpa_iic.h +++ /dev/null | |||
@@ -1,62 +0,0 @@ | |||
1 | #ifndef ASM_BPA_IIC_H | ||
2 | #define ASM_BPA_IIC_H | ||
3 | #ifdef __KERNEL__ | ||
4 | /* | ||
5 | * Mapping of IIC pending bits into per-node | ||
6 | * interrupt numbers. | ||
7 | * | ||
8 | * IRQ FF CC SS PP FF CC SS PP Description | ||
9 | * | ||
10 | * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge | ||
11 | * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge | ||
12 | * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0 | ||
13 | * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1 | ||
14 | * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2 | ||
15 | * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI | ||
16 | * | ||
17 | * F flags | ||
18 | * C class | ||
19 | * S source | ||
20 | * P Priority | ||
21 | * + node number | ||
22 | * * don't care | ||
23 | * | ||
24 | * A node consists of a Broadband Engine and an optional | ||
25 | * south bridge device providing a maximum of 64 IRQs. | ||
26 | * The south bridge may be connected to either IOIF0 | ||
27 | * or IOIF1. | ||
28 | * Each SPE is represented as three IRQ lines, one per | ||
29 | * interrupt class. | ||
30 | * 16 IRQ numbers are reserved for inter processor | ||
31 | * interruptions, although these are only used in the | ||
32 | * range of the first node. | ||
33 | * | ||
34 | * This scheme needs 128 IRQ numbers per BIF node ID, | ||
35 | * which means that with the total of 512 lines | ||
36 | * available, we can have a maximum of four nodes. | ||
37 | */ | ||
38 | |||
39 | enum { | ||
40 | IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ | ||
41 | IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ | ||
42 | IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ | ||
43 | IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ | ||
44 | IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ | ||
45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ | ||
46 | IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ | ||
47 | }; | ||
48 | |||
49 | extern void iic_init_IRQ(void); | ||
50 | extern int iic_get_irq(struct pt_regs *regs); | ||
51 | extern void iic_cause_IPI(int cpu, int mesg); | ||
52 | extern void iic_request_IPIs(void); | ||
53 | extern void iic_setup_cpu(void); | ||
54 | extern void iic_local_enable(void); | ||
55 | extern void iic_local_disable(void); | ||
56 | |||
57 | |||
58 | extern void spider_init_IRQ(void); | ||
59 | extern int spider_get_irq(unsigned long int_pending); | ||
60 | |||
61 | #endif | ||
62 | #endif /* ASM_BPA_IIC_H */ | ||
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c deleted file mode 100644 index da1b4b7a3269..000000000000 --- a/arch/ppc64/kernel/bpa_iommu.c +++ /dev/null | |||
@@ -1,381 +0,0 @@ | |||
1 | /* | ||
2 | * IOMMU implementation for Broadband Processor Architecture | ||
3 | * We just establish a linear mapping at boot by setting all the | ||
4 | * IOPT cache entries in the CPU. | ||
5 | * The mapping functions should be identical to pci_direct_iommu, | ||
6 | * except for the handling of the high order bit that is required | ||
7 | * by the Spider bridge. These should be split into a separate | ||
8 | * file at the point where we get a different bridge chip. | ||
9 | * | ||
10 | * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH, | ||
11 | * Arnd Bergmann <arndb@de.ibm.com> | ||
12 | * | ||
13 | * Based on linear mapping | ||
14 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | */ | ||
21 | |||
22 | #undef DEBUG | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/bootmem.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | |||
33 | #include <asm/sections.h> | ||
34 | #include <asm/iommu.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/pci-bridge.h> | ||
38 | #include <asm/machdep.h> | ||
39 | #include <asm/pmac_feature.h> | ||
40 | #include <asm/abs_addr.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/ppc-pci.h> | ||
43 | |||
44 | #include "bpa_iommu.h" | ||
45 | |||
46 | static inline unsigned long | ||
47 | get_iopt_entry(unsigned long real_address, unsigned long ioid, | ||
48 | unsigned long prot) | ||
49 | { | ||
50 | return (prot & IOPT_PROT_MASK) | ||
51 | | (IOPT_COHERENT) | ||
52 | | (IOPT_ORDER_VC) | ||
53 | | (real_address & IOPT_RPN_MASK) | ||
54 | | (ioid & IOPT_IOID_MASK); | ||
55 | } | ||
56 | |||
57 | typedef struct { | ||
58 | unsigned long val; | ||
59 | } ioste; | ||
60 | |||
61 | static inline ioste | ||
62 | mk_ioste(unsigned long val) | ||
63 | { | ||
64 | ioste ioste = { .val = val, }; | ||
65 | return ioste; | ||
66 | } | ||
67 | |||
68 | static inline ioste | ||
69 | get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size) | ||
70 | { | ||
71 | unsigned long ps; | ||
72 | unsigned long iostep; | ||
73 | unsigned long nnpt; | ||
74 | unsigned long shift; | ||
75 | |||
76 | switch (page_size) { | ||
77 | case 0x1000000: | ||
78 | ps = IOST_PS_16M; | ||
79 | nnpt = 0; /* one page per segment */ | ||
80 | shift = 5; /* segment has 16 iopt entries */ | ||
81 | break; | ||
82 | |||
83 | case 0x100000: | ||
84 | ps = IOST_PS_1M; | ||
85 | nnpt = 0; /* one page per segment */ | ||
86 | shift = 1; /* segment has 256 iopt entries */ | ||
87 | break; | ||
88 | |||
89 | case 0x10000: | ||
90 | ps = IOST_PS_64K; | ||
91 | nnpt = 0x07; /* 8 pages per io page table */ | ||
92 | shift = 0; /* all entries are used */ | ||
93 | break; | ||
94 | |||
95 | case 0x1000: | ||
96 | ps = IOST_PS_4K; | ||
97 | nnpt = 0x7f; /* 128 pages per io page table */ | ||
98 | shift = 0; /* all entries are used */ | ||
99 | break; | ||
100 | |||
101 | default: /* not a known compile time constant */ | ||
102 | { | ||
103 | /* BUILD_BUG_ON() is not usable here */ | ||
104 | extern void __get_iost_entry_bad_page_size(void); | ||
105 | __get_iost_entry_bad_page_size(); | ||
106 | } | ||
107 | break; | ||
108 | } | ||
109 | |||
110 | iostep = iopt_base + | ||
111 | /* need 8 bytes per iopte */ | ||
112 | (((io_address / page_size * 8) | ||
113 | /* align io page tables on 4k page boundaries */ | ||
114 | << shift) | ||
115 | /* nnpt+1 pages go into each iopt */ | ||
116 | & ~(nnpt << 12)); | ||
117 | |||
118 | nnpt++; /* this seems to work, but the documentation is not clear | ||
119 | about wether we put nnpt or nnpt-1 into the ioste bits. | ||
120 | In theory, this can't work for 4k pages. */ | ||
121 | return mk_ioste(IOST_VALID_MASK | ||
122 | | (iostep & IOST_PT_BASE_MASK) | ||
123 | | ((nnpt << 5) & IOST_NNPT_MASK) | ||
124 | | (ps & IOST_PS_MASK)); | ||
125 | } | ||
126 | |||
127 | /* compute the address of an io pte */ | ||
128 | static inline unsigned long | ||
129 | get_ioptep(ioste iost_entry, unsigned long io_address) | ||
130 | { | ||
131 | unsigned long iopt_base; | ||
132 | unsigned long page_size; | ||
133 | unsigned long page_number; | ||
134 | unsigned long iopt_offset; | ||
135 | |||
136 | iopt_base = iost_entry.val & IOST_PT_BASE_MASK; | ||
137 | page_size = iost_entry.val & IOST_PS_MASK; | ||
138 | |||
139 | /* decode page size to compute page number */ | ||
140 | page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size); | ||
141 | /* page number is an offset into the io page table */ | ||
142 | iopt_offset = (page_number << 3) & 0x7fff8ul; | ||
143 | return iopt_base + iopt_offset; | ||
144 | } | ||
145 | |||
146 | /* compute the tag field of the iopt cache entry */ | ||
147 | static inline unsigned long | ||
148 | get_ioc_tag(ioste iost_entry, unsigned long io_address) | ||
149 | { | ||
150 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
151 | |||
152 | return IOPT_VALID_MASK | ||
153 | | ((iopte & 0x00000000000000ff8ul) >> 3) | ||
154 | | ((iopte & 0x0000003fffffc0000ul) >> 9); | ||
155 | } | ||
156 | |||
157 | /* compute the hashed 6 bit index for the 4-way associative pte cache */ | ||
158 | static inline unsigned long | ||
159 | get_ioc_hash(ioste iost_entry, unsigned long io_address) | ||
160 | { | ||
161 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
162 | |||
163 | return ((iopte & 0x000000000000001f8ul) >> 3) | ||
164 | ^ ((iopte & 0x00000000000020000ul) >> 17) | ||
165 | ^ ((iopte & 0x00000000000010000ul) >> 15) | ||
166 | ^ ((iopte & 0x00000000000008000ul) >> 13) | ||
167 | ^ ((iopte & 0x00000000000004000ul) >> 11) | ||
168 | ^ ((iopte & 0x00000000000002000ul) >> 9) | ||
169 | ^ ((iopte & 0x00000000000001000ul) >> 7); | ||
170 | } | ||
171 | |||
172 | /* same as above, but pretend that we have a simpler 1-way associative | ||
173 | pte cache with an 8 bit index */ | ||
174 | static inline unsigned long | ||
175 | get_ioc_hash_1way(ioste iost_entry, unsigned long io_address) | ||
176 | { | ||
177 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
178 | |||
179 | return ((iopte & 0x000000000000001f8ul) >> 3) | ||
180 | ^ ((iopte & 0x00000000000020000ul) >> 17) | ||
181 | ^ ((iopte & 0x00000000000010000ul) >> 15) | ||
182 | ^ ((iopte & 0x00000000000008000ul) >> 13) | ||
183 | ^ ((iopte & 0x00000000000004000ul) >> 11) | ||
184 | ^ ((iopte & 0x00000000000002000ul) >> 9) | ||
185 | ^ ((iopte & 0x00000000000001000ul) >> 7) | ||
186 | ^ ((iopte & 0x0000000000000c000ul) >> 8); | ||
187 | } | ||
188 | |||
189 | static inline ioste | ||
190 | get_iost_cache(void __iomem *base, unsigned long index) | ||
191 | { | ||
192 | unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); | ||
193 | return mk_ioste(in_be64(&p[index])); | ||
194 | } | ||
195 | |||
196 | static inline void | ||
197 | set_iost_cache(void __iomem *base, unsigned long index, ioste ste) | ||
198 | { | ||
199 | unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); | ||
200 | pr_debug("ioste %02lx was %016lx, store %016lx", index, | ||
201 | get_iost_cache(base, index).val, ste.val); | ||
202 | out_be64(&p[index], ste.val); | ||
203 | pr_debug(" now %016lx\n", get_iost_cache(base, index).val); | ||
204 | } | ||
205 | |||
206 | static inline unsigned long | ||
207 | get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag) | ||
208 | { | ||
209 | unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR); | ||
210 | unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG); | ||
211 | |||
212 | *tag = tags[index]; | ||
213 | rmb(); | ||
214 | return *p; | ||
215 | } | ||
216 | |||
217 | static inline void | ||
218 | set_iopt_cache(void __iomem *base, unsigned long index, | ||
219 | unsigned long tag, unsigned long val) | ||
220 | { | ||
221 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; | ||
222 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; | ||
223 | pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n", | ||
224 | index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag); | ||
225 | |||
226 | out_be64(p, val); | ||
227 | out_be64(&tags[index], tag); | ||
228 | } | ||
229 | |||
230 | static inline void | ||
231 | set_iost_origin(void __iomem *base) | ||
232 | { | ||
233 | unsigned long __iomem *p = base + IOC_ST_ORIGIN; | ||
234 | unsigned long origin = IOSTO_ENABLE | IOSTO_SW; | ||
235 | |||
236 | pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin); | ||
237 | out_be64(p, origin); | ||
238 | } | ||
239 | |||
240 | static inline void | ||
241 | set_iocmd_config(void __iomem *base) | ||
242 | { | ||
243 | unsigned long __iomem *p = base + 0xc00; | ||
244 | unsigned long conf; | ||
245 | |||
246 | conf = in_be64(p); | ||
247 | pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE); | ||
248 | out_be64(p, conf | IOCMD_CONF_TE); | ||
249 | } | ||
250 | |||
251 | /* FIXME: get these from the device tree */ | ||
252 | #define ioc_base 0x20000511000ull | ||
253 | #define ioc_mmio_base 0x20000510000ull | ||
254 | #define ioid 0x48a | ||
255 | #define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */ | ||
256 | #define io_page_size 0x1000000 | ||
257 | |||
258 | static unsigned long map_iopt_entry(unsigned long address) | ||
259 | { | ||
260 | switch (address >> 20) { | ||
261 | case 0x600: | ||
262 | address = 0x24020000000ull; /* spider i/o */ | ||
263 | break; | ||
264 | default: | ||
265 | address += iopt_phys_offset; | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | return get_iopt_entry(address, ioid, IOPT_PROT_RW); | ||
270 | } | ||
271 | |||
272 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
273 | static void iommu_dev_setup_null(struct pci_dev *d) { } | ||
274 | |||
275 | /* initialize the iommu to support a simple linear mapping | ||
276 | * for each DMA window used by any device. For now, we | ||
277 | * happen to know that there is only one DMA window in use, | ||
278 | * starting at iopt_phys_offset. */ | ||
279 | static void bpa_map_iommu(void) | ||
280 | { | ||
281 | unsigned long address; | ||
282 | void __iomem *base; | ||
283 | ioste ioste; | ||
284 | unsigned long index; | ||
285 | |||
286 | base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); | ||
287 | pr_debug("%lx mapped to %p\n", ioc_base, base); | ||
288 | set_iocmd_config(base); | ||
289 | iounmap(base); | ||
290 | |||
291 | base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); | ||
292 | pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); | ||
293 | |||
294 | set_iost_origin(base); | ||
295 | |||
296 | for (address = 0; address < 0x100000000ul; address += io_page_size) { | ||
297 | ioste = get_iost_entry(0x10000000000ul, address, io_page_size); | ||
298 | if ((address & 0xfffffff) == 0) /* segment start */ | ||
299 | set_iost_cache(base, address >> 28, ioste); | ||
300 | index = get_ioc_hash_1way(ioste, address); | ||
301 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | ||
302 | address, index, ioste.val); | ||
303 | set_iopt_cache(base, | ||
304 | get_ioc_hash_1way(ioste, address), | ||
305 | get_ioc_tag(ioste, address), | ||
306 | map_iopt_entry(address)); | ||
307 | } | ||
308 | iounmap(base); | ||
309 | } | ||
310 | |||
311 | |||
312 | static void *bpa_alloc_coherent(struct device *hwdev, size_t size, | ||
313 | dma_addr_t *dma_handle, gfp_t flag) | ||
314 | { | ||
315 | void *ret; | ||
316 | |||
317 | ret = (void *)__get_free_pages(flag, get_order(size)); | ||
318 | if (ret != NULL) { | ||
319 | memset(ret, 0, size); | ||
320 | *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID; | ||
321 | } | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | static void bpa_free_coherent(struct device *hwdev, size_t size, | ||
326 | void *vaddr, dma_addr_t dma_handle) | ||
327 | { | ||
328 | free_pages((unsigned long)vaddr, get_order(size)); | ||
329 | } | ||
330 | |||
331 | static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr, | ||
332 | size_t size, enum dma_data_direction direction) | ||
333 | { | ||
334 | return virt_to_abs(ptr) | BPA_DMA_VALID; | ||
335 | } | ||
336 | |||
337 | static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr, | ||
338 | size_t size, enum dma_data_direction direction) | ||
339 | { | ||
340 | } | ||
341 | |||
342 | static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
343 | int nents, enum dma_data_direction direction) | ||
344 | { | ||
345 | int i; | ||
346 | |||
347 | for (i = 0; i < nents; i++, sg++) { | ||
348 | sg->dma_address = (page_to_phys(sg->page) + sg->offset) | ||
349 | | BPA_DMA_VALID; | ||
350 | sg->dma_length = sg->length; | ||
351 | } | ||
352 | |||
353 | return nents; | ||
354 | } | ||
355 | |||
356 | static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
357 | int nents, enum dma_data_direction direction) | ||
358 | { | ||
359 | } | ||
360 | |||
361 | static int bpa_dma_supported(struct device *dev, u64 mask) | ||
362 | { | ||
363 | return mask < 0x100000000ull; | ||
364 | } | ||
365 | |||
366 | void bpa_init_iommu(void) | ||
367 | { | ||
368 | bpa_map_iommu(); | ||
369 | |||
370 | /* Direct I/O, IOMMU off */ | ||
371 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
372 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
373 | |||
374 | pci_dma_ops.alloc_coherent = bpa_alloc_coherent; | ||
375 | pci_dma_ops.free_coherent = bpa_free_coherent; | ||
376 | pci_dma_ops.map_single = bpa_map_single; | ||
377 | pci_dma_ops.unmap_single = bpa_unmap_single; | ||
378 | pci_dma_ops.map_sg = bpa_map_sg; | ||
379 | pci_dma_ops.unmap_sg = bpa_unmap_sg; | ||
380 | pci_dma_ops.dma_supported = bpa_dma_supported; | ||
381 | } | ||
diff --git a/arch/ppc64/kernel/bpa_iommu.h b/arch/ppc64/kernel/bpa_iommu.h deleted file mode 100644 index e547d77dfa04..000000000000 --- a/arch/ppc64/kernel/bpa_iommu.h +++ /dev/null | |||
@@ -1,65 +0,0 @@ | |||
1 | #ifndef BPA_IOMMU_H | ||
2 | #define BPA_IOMMU_H | ||
3 | |||
4 | /* some constants */ | ||
5 | enum { | ||
6 | /* segment table entries */ | ||
7 | IOST_VALID_MASK = 0x8000000000000000ul, | ||
8 | IOST_TAG_MASK = 0x3000000000000000ul, | ||
9 | IOST_PT_BASE_MASK = 0x000003fffffff000ul, | ||
10 | IOST_NNPT_MASK = 0x0000000000000fe0ul, | ||
11 | IOST_PS_MASK = 0x000000000000000ful, | ||
12 | |||
13 | IOST_PS_4K = 0x1, | ||
14 | IOST_PS_64K = 0x3, | ||
15 | IOST_PS_1M = 0x5, | ||
16 | IOST_PS_16M = 0x7, | ||
17 | |||
18 | /* iopt tag register */ | ||
19 | IOPT_VALID_MASK = 0x0000000200000000ul, | ||
20 | IOPT_TAG_MASK = 0x00000001fffffffful, | ||
21 | |||
22 | /* iopt cache register */ | ||
23 | IOPT_PROT_MASK = 0xc000000000000000ul, | ||
24 | IOPT_PROT_NONE = 0x0000000000000000ul, | ||
25 | IOPT_PROT_READ = 0x4000000000000000ul, | ||
26 | IOPT_PROT_WRITE = 0x8000000000000000ul, | ||
27 | IOPT_PROT_RW = 0xc000000000000000ul, | ||
28 | IOPT_COHERENT = 0x2000000000000000ul, | ||
29 | |||
30 | IOPT_ORDER_MASK = 0x1800000000000000ul, | ||
31 | /* order access to same IOID/VC on same address */ | ||
32 | IOPT_ORDER_ADDR = 0x0800000000000000ul, | ||
33 | /* similar, but only after a write access */ | ||
34 | IOPT_ORDER_WRITES = 0x1000000000000000ul, | ||
35 | /* Order all accesses to same IOID/VC */ | ||
36 | IOPT_ORDER_VC = 0x1800000000000000ul, | ||
37 | |||
38 | IOPT_RPN_MASK = 0x000003fffffff000ul, | ||
39 | IOPT_HINT_MASK = 0x0000000000000800ul, | ||
40 | IOPT_IOID_MASK = 0x00000000000007fful, | ||
41 | |||
42 | IOSTO_ENABLE = 0x8000000000000000ul, | ||
43 | IOSTO_ORIGIN = 0x000003fffffff000ul, | ||
44 | IOSTO_HW = 0x0000000000000800ul, | ||
45 | IOSTO_SW = 0x0000000000000400ul, | ||
46 | |||
47 | IOCMD_CONF_TE = 0x0000800000000000ul, | ||
48 | |||
49 | /* memory mapped registers */ | ||
50 | IOC_PT_CACHE_DIR = 0x000, | ||
51 | IOC_ST_CACHE_DIR = 0x800, | ||
52 | IOC_PT_CACHE_REG = 0x910, | ||
53 | IOC_ST_ORIGIN = 0x918, | ||
54 | IOC_CONF = 0x930, | ||
55 | |||
56 | /* The high bit needs to be set on every DMA address, | ||
57 | only 2GB are addressable */ | ||
58 | BPA_DMA_VALID = 0x80000000, | ||
59 | BPA_DMA_MASK = 0x7fffffff, | ||
60 | }; | ||
61 | |||
62 | |||
63 | void bpa_init_iommu(void); | ||
64 | |||
65 | #endif | ||
diff --git a/arch/ppc64/kernel/bpa_nvram.c b/arch/ppc64/kernel/bpa_nvram.c deleted file mode 100644 index 06a119cfceb5..000000000000 --- a/arch/ppc64/kernel/bpa_nvram.c +++ /dev/null | |||
@@ -1,118 +0,0 @@ | |||
1 | /* | ||
2 | * NVRAM for CPBW | ||
3 | * | ||
4 | * (C) Copyright IBM Corp. 2005 | ||
5 | * | ||
6 | * Authors : Utz Bacher <utz.bacher@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/fs.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include <asm/machdep.h> | ||
30 | #include <asm/nvram.h> | ||
31 | #include <asm/prom.h> | ||
32 | |||
33 | static void __iomem *bpa_nvram_start; | ||
34 | static long bpa_nvram_len; | ||
35 | static spinlock_t bpa_nvram_lock = SPIN_LOCK_UNLOCKED; | ||
36 | |||
37 | static ssize_t bpa_nvram_read(char *buf, size_t count, loff_t *index) | ||
38 | { | ||
39 | unsigned long flags; | ||
40 | |||
41 | if (*index >= bpa_nvram_len) | ||
42 | return 0; | ||
43 | if (*index + count > bpa_nvram_len) | ||
44 | count = bpa_nvram_len - *index; | ||
45 | |||
46 | spin_lock_irqsave(&bpa_nvram_lock, flags); | ||
47 | |||
48 | memcpy_fromio(buf, bpa_nvram_start + *index, count); | ||
49 | |||
50 | spin_unlock_irqrestore(&bpa_nvram_lock, flags); | ||
51 | |||
52 | *index += count; | ||
53 | return count; | ||
54 | } | ||
55 | |||
56 | static ssize_t bpa_nvram_write(char *buf, size_t count, loff_t *index) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | |||
60 | if (*index >= bpa_nvram_len) | ||
61 | return 0; | ||
62 | if (*index + count > bpa_nvram_len) | ||
63 | count = bpa_nvram_len - *index; | ||
64 | |||
65 | spin_lock_irqsave(&bpa_nvram_lock, flags); | ||
66 | |||
67 | memcpy_toio(bpa_nvram_start + *index, buf, count); | ||
68 | |||
69 | spin_unlock_irqrestore(&bpa_nvram_lock, flags); | ||
70 | |||
71 | *index += count; | ||
72 | return count; | ||
73 | } | ||
74 | |||
75 | static ssize_t bpa_nvram_get_size(void) | ||
76 | { | ||
77 | return bpa_nvram_len; | ||
78 | } | ||
79 | |||
80 | int __init bpa_nvram_init(void) | ||
81 | { | ||
82 | struct device_node *nvram_node; | ||
83 | unsigned long *buffer; | ||
84 | int proplen; | ||
85 | unsigned long nvram_addr; | ||
86 | int ret; | ||
87 | |||
88 | ret = -ENODEV; | ||
89 | nvram_node = of_find_node_by_type(NULL, "nvram"); | ||
90 | if (!nvram_node) | ||
91 | goto out; | ||
92 | |||
93 | ret = -EIO; | ||
94 | buffer = (unsigned long *)get_property(nvram_node, "reg", &proplen); | ||
95 | if (proplen != 2*sizeof(unsigned long)) | ||
96 | goto out; | ||
97 | |||
98 | ret = -ENODEV; | ||
99 | nvram_addr = buffer[0]; | ||
100 | bpa_nvram_len = buffer[1]; | ||
101 | if ( (!bpa_nvram_len) || (!nvram_addr) ) | ||
102 | goto out; | ||
103 | |||
104 | bpa_nvram_start = ioremap(nvram_addr, bpa_nvram_len); | ||
105 | if (!bpa_nvram_start) | ||
106 | goto out; | ||
107 | |||
108 | printk(KERN_INFO "BPA NVRAM, %luk mapped to %p\n", | ||
109 | bpa_nvram_len >> 10, bpa_nvram_start); | ||
110 | |||
111 | ppc_md.nvram_read = bpa_nvram_read; | ||
112 | ppc_md.nvram_write = bpa_nvram_write; | ||
113 | ppc_md.nvram_size = bpa_nvram_get_size; | ||
114 | |||
115 | out: | ||
116 | of_node_put(nvram_node); | ||
117 | return ret; | ||
118 | } | ||
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c deleted file mode 100644 index c2dc8f282eb8..000000000000 --- a/arch/ppc64/kernel/bpa_setup.c +++ /dev/null | |||
@@ -1,141 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/ppc/kernel/bpa_setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Adapted from 'alpha' version by Gary Thomas | ||
6 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
7 | * Modified by PPC64 Team, IBM Corp | ||
8 | * Modified by BPA Team, IBM Deutschland Entwicklung GmbH | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | #undef DEBUG | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/unistd.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/user.h> | ||
25 | #include <linux/reboot.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/root_dev.h> | ||
31 | #include <linux/console.h> | ||
32 | |||
33 | #include <asm/mmu.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/prom.h> | ||
38 | #include <asm/rtas.h> | ||
39 | #include <asm/pci-bridge.h> | ||
40 | #include <asm/iommu.h> | ||
41 | #include <asm/dma.h> | ||
42 | #include <asm/machdep.h> | ||
43 | #include <asm/time.h> | ||
44 | #include <asm/nvram.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/ppc-pci.h> | ||
47 | #include <asm/irq.h> | ||
48 | |||
49 | #include "bpa_iic.h" | ||
50 | #include "bpa_iommu.h" | ||
51 | |||
52 | #ifdef DEBUG | ||
53 | #define DBG(fmt...) udbg_printf(fmt) | ||
54 | #else | ||
55 | #define DBG(fmt...) | ||
56 | #endif | ||
57 | |||
58 | void bpa_show_cpuinfo(struct seq_file *m) | ||
59 | { | ||
60 | struct device_node *root; | ||
61 | const char *model = ""; | ||
62 | |||
63 | root = of_find_node_by_path("/"); | ||
64 | if (root) | ||
65 | model = get_property(root, "model", NULL); | ||
66 | seq_printf(m, "machine\t\t: BPA %s\n", model); | ||
67 | of_node_put(root); | ||
68 | } | ||
69 | |||
70 | static void bpa_progress(char *s, unsigned short hex) | ||
71 | { | ||
72 | printk("*** %04x : %s\n", hex, s ? s : ""); | ||
73 | } | ||
74 | |||
75 | static void __init bpa_setup_arch(void) | ||
76 | { | ||
77 | ppc_md.init_IRQ = iic_init_IRQ; | ||
78 | ppc_md.get_irq = iic_get_irq; | ||
79 | |||
80 | #ifdef CONFIG_SMP | ||
81 | smp_init_pSeries(); | ||
82 | #endif | ||
83 | |||
84 | /* init to some ~sane value until calibrate_delay() runs */ | ||
85 | loops_per_jiffy = 50000000; | ||
86 | |||
87 | if (ROOT_DEV == 0) { | ||
88 | printk("No ramdisk, default root is /dev/hda2\n"); | ||
89 | ROOT_DEV = Root_HDA2; | ||
90 | } | ||
91 | |||
92 | /* Find and initialize PCI host bridges */ | ||
93 | init_pci_config_tokens(); | ||
94 | find_and_init_phbs(); | ||
95 | spider_init_IRQ(); | ||
96 | #ifdef CONFIG_DUMMY_CONSOLE | ||
97 | conswitchp = &dummy_con; | ||
98 | #endif | ||
99 | |||
100 | bpa_nvram_init(); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Early initialization. Relocation is on but do not reference unbolted pages | ||
105 | */ | ||
106 | static void __init bpa_init_early(void) | ||
107 | { | ||
108 | DBG(" -> bpa_init_early()\n"); | ||
109 | |||
110 | hpte_init_native(); | ||
111 | |||
112 | bpa_init_iommu(); | ||
113 | |||
114 | ppc64_interrupt_controller = IC_BPA_IIC; | ||
115 | |||
116 | DBG(" <- bpa_init_early()\n"); | ||
117 | } | ||
118 | |||
119 | |||
120 | static int __init bpa_probe(int platform) | ||
121 | { | ||
122 | if (platform != PLATFORM_BPA) | ||
123 | return 0; | ||
124 | |||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | struct machdep_calls __initdata bpa_md = { | ||
129 | .probe = bpa_probe, | ||
130 | .setup_arch = bpa_setup_arch, | ||
131 | .init_early = bpa_init_early, | ||
132 | .show_cpuinfo = bpa_show_cpuinfo, | ||
133 | .restart = rtas_restart, | ||
134 | .power_off = rtas_power_off, | ||
135 | .halt = rtas_halt, | ||
136 | .get_boot_time = rtas_get_boot_time, | ||
137 | .get_rtc_time = rtas_get_rtc_time, | ||
138 | .set_rtc_time = rtas_set_rtc_time, | ||
139 | .calibrate_decr = generic_calibrate_decr, | ||
140 | .progress = bpa_progress, | ||
141 | }; | ||
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index 929f9f42cf7a..9e8050ea1225 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <asm/cputable.h> | 35 | #include <asm/cputable.h> |
36 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
37 | #include <asm/hvcall.h> | 37 | #include <asm/hvcall.h> |
38 | #include <asm/iSeries/LparMap.h> | 38 | #include <asm/iseries/lpar_map.h> |
39 | #include <asm/thread_info.h> | 39 | #include <asm/thread_info.h> |
40 | 40 | ||
41 | #ifdef CONFIG_PPC_ISERIES | 41 | #ifdef CONFIG_PPC_ISERIES |
@@ -195,11 +195,11 @@ exception_marker: | |||
195 | #define EX_R12 24 | 195 | #define EX_R12 24 |
196 | #define EX_R13 32 | 196 | #define EX_R13 32 |
197 | #define EX_SRR0 40 | 197 | #define EX_SRR0 40 |
198 | #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ | ||
199 | #define EX_DAR 48 | 198 | #define EX_DAR 48 |
200 | #define EX_LR 48 /* SLB miss saves LR, but not DAR */ | ||
201 | #define EX_DSISR 56 | 199 | #define EX_DSISR 56 |
202 | #define EX_CCR 60 | 200 | #define EX_CCR 60 |
201 | #define EX_R3 64 | ||
202 | #define EX_LR 72 | ||
203 | 203 | ||
204 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | 204 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ |
205 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ | 205 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ |
@@ -419,17 +419,22 @@ data_access_slb_pSeries: | |||
419 | mtspr SPRN_SPRG1,r13 | 419 | mtspr SPRN_SPRG1,r13 |
420 | RUNLATCH_ON(r13) | 420 | RUNLATCH_ON(r13) |
421 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | 421 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
422 | std r3,PACA_EXSLB+EX_R3(r13) | ||
423 | mfspr r3,SPRN_DAR | ||
422 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 424 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
425 | mfcr r9 | ||
426 | #ifdef __DISABLED__ | ||
427 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
428 | cmpdi r3,0 | ||
429 | bge slb_miss_user_pseries | ||
430 | #endif /* __DISABLED__ */ | ||
423 | std r10,PACA_EXSLB+EX_R10(r13) | 431 | std r10,PACA_EXSLB+EX_R10(r13) |
424 | std r11,PACA_EXSLB+EX_R11(r13) | 432 | std r11,PACA_EXSLB+EX_R11(r13) |
425 | std r12,PACA_EXSLB+EX_R12(r13) | 433 | std r12,PACA_EXSLB+EX_R12(r13) |
426 | std r3,PACA_EXSLB+EX_R3(r13) | 434 | mfspr r10,SPRN_SPRG1 |
427 | mfspr r9,SPRN_SPRG1 | 435 | std r10,PACA_EXSLB+EX_R13(r13) |
428 | std r9,PACA_EXSLB+EX_R13(r13) | ||
429 | mfcr r9 | ||
430 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 436 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
431 | mfspr r3,SPRN_DAR | 437 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
432 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
433 | 438 | ||
434 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 439 | STD_EXCEPTION_PSERIES(0x400, instruction_access) |
435 | 440 | ||
@@ -440,17 +445,22 @@ instruction_access_slb_pSeries: | |||
440 | mtspr SPRN_SPRG1,r13 | 445 | mtspr SPRN_SPRG1,r13 |
441 | RUNLATCH_ON(r13) | 446 | RUNLATCH_ON(r13) |
442 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | 447 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
448 | std r3,PACA_EXSLB+EX_R3(r13) | ||
449 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | ||
443 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 450 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
451 | mfcr r9 | ||
452 | #ifdef __DISABLED__ | ||
453 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
454 | cmpdi r3,0 | ||
455 | bge slb_miss_user_pseries | ||
456 | #endif /* __DISABLED__ */ | ||
444 | std r10,PACA_EXSLB+EX_R10(r13) | 457 | std r10,PACA_EXSLB+EX_R10(r13) |
445 | std r11,PACA_EXSLB+EX_R11(r13) | 458 | std r11,PACA_EXSLB+EX_R11(r13) |
446 | std r12,PACA_EXSLB+EX_R12(r13) | 459 | std r12,PACA_EXSLB+EX_R12(r13) |
447 | std r3,PACA_EXSLB+EX_R3(r13) | 460 | mfspr r10,SPRN_SPRG1 |
448 | mfspr r9,SPRN_SPRG1 | 461 | std r10,PACA_EXSLB+EX_R13(r13) |
449 | std r9,PACA_EXSLB+EX_R13(r13) | ||
450 | mfcr r9 | ||
451 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 462 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
452 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 463 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
453 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
454 | 464 | ||
455 | STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 465 | STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
456 | STD_EXCEPTION_PSERIES(0x600, alignment) | 466 | STD_EXCEPTION_PSERIES(0x600, alignment) |
@@ -509,6 +519,38 @@ _GLOBAL(do_stab_bolted_pSeries) | |||
509 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | 519 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
510 | 520 | ||
511 | /* | 521 | /* |
522 | * We have some room here we use that to put | ||
523 | * the peries slb miss user trampoline code so it's reasonably | ||
524 | * away from slb_miss_user_common to avoid problems with rfid | ||
525 | * | ||
526 | * This is used for when the SLB miss handler has to go virtual, | ||
527 | * which doesn't happen for now anymore but will once we re-implement | ||
528 | * dynamic VSIDs for shared page tables | ||
529 | */ | ||
530 | #ifdef __DISABLED__ | ||
531 | slb_miss_user_pseries: | ||
532 | std r10,PACA_EXGEN+EX_R10(r13) | ||
533 | std r11,PACA_EXGEN+EX_R11(r13) | ||
534 | std r12,PACA_EXGEN+EX_R12(r13) | ||
535 | mfspr r10,SPRG1 | ||
536 | ld r11,PACA_EXSLB+EX_R9(r13) | ||
537 | ld r12,PACA_EXSLB+EX_R3(r13) | ||
538 | std r10,PACA_EXGEN+EX_R13(r13) | ||
539 | std r11,PACA_EXGEN+EX_R9(r13) | ||
540 | std r12,PACA_EXGEN+EX_R3(r13) | ||
541 | clrrdi r12,r13,32 | ||
542 | mfmsr r10 | ||
543 | mfspr r11,SRR0 /* save SRR0 */ | ||
544 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | ||
545 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
546 | mtspr SRR0,r12 | ||
547 | mfspr r12,SRR1 /* and SRR1 */ | ||
548 | mtspr SRR1,r10 | ||
549 | rfid | ||
550 | b . /* prevent spec. execution */ | ||
551 | #endif /* __DISABLED__ */ | ||
552 | |||
553 | /* | ||
512 | * Vectors for the FWNMI option. Share common code. | 554 | * Vectors for the FWNMI option. Share common code. |
513 | */ | 555 | */ |
514 | .globl system_reset_fwnmi | 556 | .globl system_reset_fwnmi |
@@ -559,22 +601,59 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |||
559 | .globl data_access_slb_iSeries | 601 | .globl data_access_slb_iSeries |
560 | data_access_slb_iSeries: | 602 | data_access_slb_iSeries: |
561 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 603 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
562 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | 604 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
563 | std r3,PACA_EXSLB+EX_R3(r13) | 605 | std r3,PACA_EXSLB+EX_R3(r13) |
564 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
565 | mfspr r3,SPRN_DAR | 606 | mfspr r3,SPRN_DAR |
566 | b .do_slb_miss | 607 | std r9,PACA_EXSLB+EX_R9(r13) |
608 | mfcr r9 | ||
609 | #ifdef __DISABLED__ | ||
610 | cmpdi r3,0 | ||
611 | bge slb_miss_user_iseries | ||
612 | #endif | ||
613 | std r10,PACA_EXSLB+EX_R10(r13) | ||
614 | std r11,PACA_EXSLB+EX_R11(r13) | ||
615 | std r12,PACA_EXSLB+EX_R12(r13) | ||
616 | mfspr r10,SPRN_SPRG1 | ||
617 | std r10,PACA_EXSLB+EX_R13(r13) | ||
618 | ld r12,PACALPPACA+LPPACASRR1(r13); | ||
619 | b .slb_miss_realmode | ||
567 | 620 | ||
568 | STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) | 621 | STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) |
569 | 622 | ||
570 | .globl instruction_access_slb_iSeries | 623 | .globl instruction_access_slb_iSeries |
571 | instruction_access_slb_iSeries: | 624 | instruction_access_slb_iSeries: |
572 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 625 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
573 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | 626 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
574 | std r3,PACA_EXSLB+EX_R3(r13) | 627 | std r3,PACA_EXSLB+EX_R3(r13) |
575 | ld r12,PACALPPACA+LPPACASRR1(r13) | 628 | ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ |
576 | ld r3,PACALPPACA+LPPACASRR0(r13) | 629 | std r9,PACA_EXSLB+EX_R9(r13) |
577 | b .do_slb_miss | 630 | mfcr r9 |
631 | #ifdef __DISABLED__ | ||
632 | cmpdi r3,0 | ||
633 | bge .slb_miss_user_iseries | ||
634 | #endif | ||
635 | std r10,PACA_EXSLB+EX_R10(r13) | ||
636 | std r11,PACA_EXSLB+EX_R11(r13) | ||
637 | std r12,PACA_EXSLB+EX_R12(r13) | ||
638 | mfspr r10,SPRN_SPRG1 | ||
639 | std r10,PACA_EXSLB+EX_R13(r13) | ||
640 | ld r12,PACALPPACA+LPPACASRR1(r13); | ||
641 | b .slb_miss_realmode | ||
642 | |||
643 | #ifdef __DISABLED__ | ||
644 | slb_miss_user_iseries: | ||
645 | std r10,PACA_EXGEN+EX_R10(r13) | ||
646 | std r11,PACA_EXGEN+EX_R11(r13) | ||
647 | std r12,PACA_EXGEN+EX_R12(r13) | ||
648 | mfspr r10,SPRG1 | ||
649 | ld r11,PACA_EXSLB+EX_R9(r13) | ||
650 | ld r12,PACA_EXSLB+EX_R3(r13) | ||
651 | std r10,PACA_EXGEN+EX_R13(r13) | ||
652 | std r11,PACA_EXGEN+EX_R9(r13) | ||
653 | std r12,PACA_EXGEN+EX_R3(r13) | ||
654 | EXCEPTION_PROLOG_ISERIES_2 | ||
655 | b slb_miss_user_common | ||
656 | #endif | ||
578 | 657 | ||
579 | MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) | 658 | MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) |
580 | STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) | 659 | STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) |
@@ -809,6 +888,126 @@ instruction_access_common: | |||
809 | li r5,0x400 | 888 | li r5,0x400 |
810 | b .do_hash_page /* Try to handle as hpte fault */ | 889 | b .do_hash_page /* Try to handle as hpte fault */ |
811 | 890 | ||
891 | /* | ||
892 | * Here is the common SLB miss user that is used when going to virtual | ||
893 | * mode for SLB misses, that is currently not used | ||
894 | */ | ||
895 | #ifdef __DISABLED__ | ||
896 | .align 7 | ||
897 | .globl slb_miss_user_common | ||
898 | slb_miss_user_common: | ||
899 | mflr r10 | ||
900 | std r3,PACA_EXGEN+EX_DAR(r13) | ||
901 | stw r9,PACA_EXGEN+EX_CCR(r13) | ||
902 | std r10,PACA_EXGEN+EX_LR(r13) | ||
903 | std r11,PACA_EXGEN+EX_SRR0(r13) | ||
904 | bl .slb_allocate_user | ||
905 | |||
906 | ld r10,PACA_EXGEN+EX_LR(r13) | ||
907 | ld r3,PACA_EXGEN+EX_R3(r13) | ||
908 | lwz r9,PACA_EXGEN+EX_CCR(r13) | ||
909 | ld r11,PACA_EXGEN+EX_SRR0(r13) | ||
910 | mtlr r10 | ||
911 | beq- slb_miss_fault | ||
912 | |||
913 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
914 | beq- unrecov_user_slb | ||
915 | mfmsr r10 | ||
916 | |||
917 | .machine push | ||
918 | .machine "power4" | ||
919 | mtcrf 0x80,r9 | ||
920 | .machine pop | ||
921 | |||
922 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | ||
923 | mtmsrd r10,1 | ||
924 | |||
925 | mtspr SRR0,r11 | ||
926 | mtspr SRR1,r12 | ||
927 | |||
928 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
929 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
930 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
931 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
932 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
933 | rfid | ||
934 | b . | ||
935 | |||
936 | slb_miss_fault: | ||
937 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | ||
938 | ld r4,PACA_EXGEN+EX_DAR(r13) | ||
939 | li r5,0 | ||
940 | std r4,_DAR(r1) | ||
941 | std r5,_DSISR(r1) | ||
942 | b .handle_page_fault | ||
943 | |||
944 | unrecov_user_slb: | ||
945 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | ||
946 | DISABLE_INTS | ||
947 | bl .save_nvgprs | ||
948 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
949 | bl .unrecoverable_exception | ||
950 | b 1b | ||
951 | |||
952 | #endif /* __DISABLED__ */ | ||
953 | |||
954 | |||
955 | /* | ||
956 | * r13 points to the PACA, r9 contains the saved CR, | ||
957 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
958 | * r3 has the faulting address | ||
959 | * r9 - r13 are saved in paca->exslb. | ||
960 | * r3 is saved in paca->slb_r3 | ||
961 | * We assume we aren't going to take any exceptions during this procedure. | ||
962 | */ | ||
963 | _GLOBAL(slb_miss_realmode) | ||
964 | mflr r10 | ||
965 | |||
966 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
967 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
968 | |||
969 | bl .slb_allocate_realmode | ||
970 | |||
971 | /* All done -- return from exception. */ | ||
972 | |||
973 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
974 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
975 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
976 | #ifdef CONFIG_PPC_ISERIES | ||
977 | ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ | ||
978 | #endif /* CONFIG_PPC_ISERIES */ | ||
979 | |||
980 | mtlr r10 | ||
981 | |||
982 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
983 | beq- unrecov_slb | ||
984 | |||
985 | .machine push | ||
986 | .machine "power4" | ||
987 | mtcrf 0x80,r9 | ||
988 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
989 | .machine pop | ||
990 | |||
991 | #ifdef CONFIG_PPC_ISERIES | ||
992 | mtspr SPRN_SRR0,r11 | ||
993 | mtspr SPRN_SRR1,r12 | ||
994 | #endif /* CONFIG_PPC_ISERIES */ | ||
995 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
996 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
997 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
998 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
999 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1000 | rfid | ||
1001 | b . /* prevent speculative execution */ | ||
1002 | |||
1003 | unrecov_slb: | ||
1004 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1005 | DISABLE_INTS | ||
1006 | bl .save_nvgprs | ||
1007 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1008 | bl .unrecoverable_exception | ||
1009 | b 1b | ||
1010 | |||
812 | .align 7 | 1011 | .align 7 |
813 | .globl hardware_interrupt_common | 1012 | .globl hardware_interrupt_common |
814 | .globl hardware_interrupt_entry | 1013 | .globl hardware_interrupt_entry |
@@ -1139,62 +1338,6 @@ _GLOBAL(do_stab_bolted) | |||
1139 | b . /* prevent speculative execution */ | 1338 | b . /* prevent speculative execution */ |
1140 | 1339 | ||
1141 | /* | 1340 | /* |
1142 | * r13 points to the PACA, r9 contains the saved CR, | ||
1143 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1144 | * r3 has the faulting address | ||
1145 | * r9 - r13 are saved in paca->exslb. | ||
1146 | * r3 is saved in paca->slb_r3 | ||
1147 | * We assume we aren't going to take any exceptions during this procedure. | ||
1148 | */ | ||
1149 | _GLOBAL(do_slb_miss) | ||
1150 | mflr r10 | ||
1151 | |||
1152 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1153 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
1154 | |||
1155 | bl .slb_allocate /* handle it */ | ||
1156 | |||
1157 | /* All done -- return from exception. */ | ||
1158 | |||
1159 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
1160 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
1161 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1162 | #ifdef CONFIG_PPC_ISERIES | ||
1163 | ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ | ||
1164 | #endif /* CONFIG_PPC_ISERIES */ | ||
1165 | |||
1166 | mtlr r10 | ||
1167 | |||
1168 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
1169 | beq- unrecov_slb | ||
1170 | |||
1171 | .machine push | ||
1172 | .machine "power4" | ||
1173 | mtcrf 0x80,r9 | ||
1174 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1175 | .machine pop | ||
1176 | |||
1177 | #ifdef CONFIG_PPC_ISERIES | ||
1178 | mtspr SPRN_SRR0,r11 | ||
1179 | mtspr SPRN_SRR1,r12 | ||
1180 | #endif /* CONFIG_PPC_ISERIES */ | ||
1181 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1182 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1183 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1184 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1185 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1186 | rfid | ||
1187 | b . /* prevent speculative execution */ | ||
1188 | |||
1189 | unrecov_slb: | ||
1190 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1191 | DISABLE_INTS | ||
1192 | bl .save_nvgprs | ||
1193 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1194 | bl .unrecoverable_exception | ||
1195 | b 1b | ||
1196 | |||
1197 | /* | ||
1198 | * Space for CPU0's segment table. | 1341 | * Space for CPU0's segment table. |
1199 | * | 1342 | * |
1200 | * On iSeries, the hypervisor must fill in at least one entry before | 1343 | * On iSeries, the hypervisor must fill in at least one entry before |
@@ -1569,7 +1712,10 @@ _GLOBAL(__secondary_start) | |||
1569 | #endif | 1712 | #endif |
1570 | /* Initialize the first segment table (or SLB) entry */ | 1713 | /* Initialize the first segment table (or SLB) entry */ |
1571 | ld r3,PACASTABVIRT(r13) /* get addr of segment table */ | 1714 | ld r3,PACASTABVIRT(r13) /* get addr of segment table */ |
1715 | BEGIN_FTR_SECTION | ||
1572 | bl .stab_initialize | 1716 | bl .stab_initialize |
1717 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
1718 | bl .slb_initialize | ||
1573 | 1719 | ||
1574 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | 1720 | /* Initialize the kernel stack. Just a repeat for iSeries. */ |
1575 | LOADADDR(r3,current_set) | 1721 | LOADADDR(r3,current_set) |
@@ -1914,24 +2060,6 @@ _GLOBAL(hmt_start_secondary) | |||
1914 | blr | 2060 | blr |
1915 | #endif | 2061 | #endif |
1916 | 2062 | ||
1917 | #if defined(CONFIG_KEXEC) || defined(CONFIG_SMP) | ||
1918 | _GLOBAL(smp_release_cpus) | ||
1919 | /* All secondary cpus are spinning on a common | ||
1920 | * spinloop, release them all now so they can start | ||
1921 | * to spin on their individual paca spinloops. | ||
1922 | * For non SMP kernels, the secondary cpus never | ||
1923 | * get out of the common spinloop. | ||
1924 | * XXX This does nothing useful on iSeries, secondaries are | ||
1925 | * already waiting on their paca. | ||
1926 | */ | ||
1927 | li r3,1 | ||
1928 | LOADADDR(r5,__secondary_hold_spinloop) | ||
1929 | std r3,0(r5) | ||
1930 | sync | ||
1931 | blr | ||
1932 | #endif /* CONFIG_SMP */ | ||
1933 | |||
1934 | |||
1935 | /* | 2063 | /* |
1936 | * We put a few things here that have to be page-aligned. | 2064 | * We put a few things here that have to be page-aligned. |
1937 | * This stuff goes at the beginning of the bss, which is page-aligned. | 2065 | * This stuff goes at the beginning of the bss, which is page-aligned. |
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index f41afe545045..87474584033f 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include <asm/cache.h> | 52 | #include <asm/cache.h> |
53 | #include <asm/prom.h> | 53 | #include <asm/prom.h> |
54 | #include <asm/ptrace.h> | 54 | #include <asm/ptrace.h> |
55 | #include <asm/iSeries/ItLpQueue.h> | 55 | #include <asm/iseries/it_lp_queue.h> |
56 | #include <asm/machdep.h> | 56 | #include <asm/machdep.h> |
57 | #include <asm/paca.h> | 57 | #include <asm/paca.h> |
58 | 58 | ||
@@ -392,7 +392,7 @@ int virt_irq_create_mapping(unsigned int real_irq) | |||
392 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | 392 | if (ppc64_interrupt_controller == IC_OPEN_PIC) |
393 | return real_irq; /* no mapping for openpic (for now) */ | 393 | return real_irq; /* no mapping for openpic (for now) */ |
394 | 394 | ||
395 | if (ppc64_interrupt_controller == IC_BPA_IIC) | 395 | if (ppc64_interrupt_controller == IC_CELL_PIC) |
396 | return real_irq; /* no mapping for iic either */ | 396 | return real_irq; /* no mapping for iic either */ |
397 | 397 | ||
398 | /* don't map interrupts < MIN_VIRT_IRQ */ | 398 | /* don't map interrupts < MIN_VIRT_IRQ */ |
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index ed876a5178ae..511af54e6230 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c | |||
@@ -30,19 +30,14 @@ | |||
30 | #include <linux/config.h> | 30 | #include <linux/config.h> |
31 | #include <linux/kprobes.h> | 31 | #include <linux/kprobes.h> |
32 | #include <linux/ptrace.h> | 32 | #include <linux/ptrace.h> |
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/preempt.h> | 33 | #include <linux/preempt.h> |
35 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
36 | #include <asm/kdebug.h> | 35 | #include <asm/kdebug.h> |
37 | #include <asm/sstep.h> | 36 | #include <asm/sstep.h> |
38 | 37 | ||
39 | static DECLARE_MUTEX(kprobe_mutex); | 38 | static DECLARE_MUTEX(kprobe_mutex); |
40 | 39 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
41 | static struct kprobe *current_kprobe; | 40 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
42 | static unsigned long kprobe_status, kprobe_saved_msr; | ||
43 | static struct kprobe *kprobe_prev; | ||
44 | static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; | ||
45 | static struct pt_regs jprobe_saved_regs; | ||
46 | 41 | ||
47 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 42 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
48 | { | 43 | { |
@@ -108,20 +103,28 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
108 | regs->nip = (unsigned long)p->ainsn.insn; | 103 | regs->nip = (unsigned long)p->ainsn.insn; |
109 | } | 104 | } |
110 | 105 | ||
111 | static inline void save_previous_kprobe(void) | 106 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
107 | { | ||
108 | kcb->prev_kprobe.kp = kprobe_running(); | ||
109 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
110 | kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; | ||
111 | } | ||
112 | |||
113 | static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
112 | { | 114 | { |
113 | kprobe_prev = current_kprobe; | 115 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
114 | kprobe_status_prev = kprobe_status; | 116 | kcb->kprobe_status = kcb->prev_kprobe.status; |
115 | kprobe_saved_msr_prev = kprobe_saved_msr; | 117 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; |
116 | } | 118 | } |
117 | 119 | ||
118 | static inline void restore_previous_kprobe(void) | 120 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
121 | struct kprobe_ctlblk *kcb) | ||
119 | { | 122 | { |
120 | current_kprobe = kprobe_prev; | 123 | __get_cpu_var(current_kprobe) = p; |
121 | kprobe_status = kprobe_status_prev; | 124 | kcb->kprobe_saved_msr = regs->msr; |
122 | kprobe_saved_msr = kprobe_saved_msr_prev; | ||
123 | } | 125 | } |
124 | 126 | ||
127 | /* Called with kretprobe_lock held */ | ||
125 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, | 128 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
126 | struct pt_regs *regs) | 129 | struct pt_regs *regs) |
127 | { | 130 | { |
@@ -145,19 +148,24 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
145 | struct kprobe *p; | 148 | struct kprobe *p; |
146 | int ret = 0; | 149 | int ret = 0; |
147 | unsigned int *addr = (unsigned int *)regs->nip; | 150 | unsigned int *addr = (unsigned int *)regs->nip; |
151 | struct kprobe_ctlblk *kcb; | ||
152 | |||
153 | /* | ||
154 | * We don't want to be preempted for the entire | ||
155 | * duration of kprobe processing | ||
156 | */ | ||
157 | preempt_disable(); | ||
158 | kcb = get_kprobe_ctlblk(); | ||
148 | 159 | ||
149 | /* Check we're not actually recursing */ | 160 | /* Check we're not actually recursing */ |
150 | if (kprobe_running()) { | 161 | if (kprobe_running()) { |
151 | /* We *are* holding lock here, so this is safe. | ||
152 | Disarm the probe we just hit, and ignore it. */ | ||
153 | p = get_kprobe(addr); | 162 | p = get_kprobe(addr); |
154 | if (p) { | 163 | if (p) { |
155 | kprobe_opcode_t insn = *p->ainsn.insn; | 164 | kprobe_opcode_t insn = *p->ainsn.insn; |
156 | if (kprobe_status == KPROBE_HIT_SS && | 165 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
157 | is_trap(insn)) { | 166 | is_trap(insn)) { |
158 | regs->msr &= ~MSR_SE; | 167 | regs->msr &= ~MSR_SE; |
159 | regs->msr |= kprobe_saved_msr; | 168 | regs->msr |= kcb->kprobe_saved_msr; |
160 | unlock_kprobes(); | ||
161 | goto no_kprobe; | 169 | goto no_kprobe; |
162 | } | 170 | } |
163 | /* We have reentered the kprobe_handler(), since | 171 | /* We have reentered the kprobe_handler(), since |
@@ -166,27 +174,24 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
166 | * just single step on the instruction of the new probe | 174 | * just single step on the instruction of the new probe |
167 | * without calling any user handlers. | 175 | * without calling any user handlers. |
168 | */ | 176 | */ |
169 | save_previous_kprobe(); | 177 | save_previous_kprobe(kcb); |
170 | current_kprobe = p; | 178 | set_current_kprobe(p, regs, kcb); |
171 | kprobe_saved_msr = regs->msr; | 179 | kcb->kprobe_saved_msr = regs->msr; |
172 | p->nmissed++; | 180 | p->nmissed++; |
173 | prepare_singlestep(p, regs); | 181 | prepare_singlestep(p, regs); |
174 | kprobe_status = KPROBE_REENTER; | 182 | kcb->kprobe_status = KPROBE_REENTER; |
175 | return 1; | 183 | return 1; |
176 | } else { | 184 | } else { |
177 | p = current_kprobe; | 185 | p = __get_cpu_var(current_kprobe); |
178 | if (p->break_handler && p->break_handler(p, regs)) { | 186 | if (p->break_handler && p->break_handler(p, regs)) { |
179 | goto ss_probe; | 187 | goto ss_probe; |
180 | } | 188 | } |
181 | } | 189 | } |
182 | /* If it's not ours, can't be delete race, (we hold lock). */ | ||
183 | goto no_kprobe; | 190 | goto no_kprobe; |
184 | } | 191 | } |
185 | 192 | ||
186 | lock_kprobes(); | ||
187 | p = get_kprobe(addr); | 193 | p = get_kprobe(addr); |
188 | if (!p) { | 194 | if (!p) { |
189 | unlock_kprobes(); | ||
190 | if (*addr != BREAKPOINT_INSTRUCTION) { | 195 | if (*addr != BREAKPOINT_INSTRUCTION) { |
191 | /* | 196 | /* |
192 | * PowerPC has multiple variants of the "trap" | 197 | * PowerPC has multiple variants of the "trap" |
@@ -209,24 +214,19 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
209 | goto no_kprobe; | 214 | goto no_kprobe; |
210 | } | 215 | } |
211 | 216 | ||
212 | kprobe_status = KPROBE_HIT_ACTIVE; | 217 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
213 | current_kprobe = p; | 218 | set_current_kprobe(p, regs, kcb); |
214 | kprobe_saved_msr = regs->msr; | ||
215 | if (p->pre_handler && p->pre_handler(p, regs)) | 219 | if (p->pre_handler && p->pre_handler(p, regs)) |
216 | /* handler has already set things up, so skip ss setup */ | 220 | /* handler has already set things up, so skip ss setup */ |
217 | return 1; | 221 | return 1; |
218 | 222 | ||
219 | ss_probe: | 223 | ss_probe: |
220 | prepare_singlestep(p, regs); | 224 | prepare_singlestep(p, regs); |
221 | kprobe_status = KPROBE_HIT_SS; | 225 | kcb->kprobe_status = KPROBE_HIT_SS; |
222 | /* | ||
223 | * This preempt_disable() matches the preempt_enable_no_resched() | ||
224 | * in post_kprobe_handler(). | ||
225 | */ | ||
226 | preempt_disable(); | ||
227 | return 1; | 226 | return 1; |
228 | 227 | ||
229 | no_kprobe: | 228 | no_kprobe: |
229 | preempt_enable_no_resched(); | ||
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
@@ -251,9 +251,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
251 | struct kretprobe_instance *ri = NULL; | 251 | struct kretprobe_instance *ri = NULL; |
252 | struct hlist_head *head; | 252 | struct hlist_head *head; |
253 | struct hlist_node *node, *tmp; | 253 | struct hlist_node *node, *tmp; |
254 | unsigned long orig_ret_address = 0; | 254 | unsigned long flags, orig_ret_address = 0; |
255 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | 255 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
256 | 256 | ||
257 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
257 | head = kretprobe_inst_table_head(current); | 258 | head = kretprobe_inst_table_head(current); |
258 | 259 | ||
259 | /* | 260 | /* |
@@ -292,12 +293,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
292 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 293 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
293 | regs->nip = orig_ret_address; | 294 | regs->nip = orig_ret_address; |
294 | 295 | ||
295 | unlock_kprobes(); | 296 | reset_current_kprobe(); |
297 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
298 | preempt_enable_no_resched(); | ||
296 | 299 | ||
297 | /* | 300 | /* |
298 | * By returning a non-zero value, we are telling | 301 | * By returning a non-zero value, we are telling |
299 | * kprobe_handler() that we have handled unlocking | 302 | * kprobe_handler() that we don't want the post_handler |
300 | * and re-enabling preemption. | 303 | * to run (and have re-enabled preemption) |
301 | */ | 304 | */ |
302 | return 1; | 305 | return 1; |
303 | } | 306 | } |
@@ -323,23 +326,26 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
323 | 326 | ||
324 | static inline int post_kprobe_handler(struct pt_regs *regs) | 327 | static inline int post_kprobe_handler(struct pt_regs *regs) |
325 | { | 328 | { |
326 | if (!kprobe_running()) | 329 | struct kprobe *cur = kprobe_running(); |
330 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
331 | |||
332 | if (!cur) | ||
327 | return 0; | 333 | return 0; |
328 | 334 | ||
329 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { | 335 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
330 | kprobe_status = KPROBE_HIT_SSDONE; | 336 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
331 | current_kprobe->post_handler(current_kprobe, regs, 0); | 337 | cur->post_handler(cur, regs, 0); |
332 | } | 338 | } |
333 | 339 | ||
334 | resume_execution(current_kprobe, regs); | 340 | resume_execution(cur, regs); |
335 | regs->msr |= kprobe_saved_msr; | 341 | regs->msr |= kcb->kprobe_saved_msr; |
336 | 342 | ||
337 | /*Restore back the original saved kprobes variables and continue. */ | 343 | /*Restore back the original saved kprobes variables and continue. */ |
338 | if (kprobe_status == KPROBE_REENTER) { | 344 | if (kcb->kprobe_status == KPROBE_REENTER) { |
339 | restore_previous_kprobe(); | 345 | restore_previous_kprobe(kcb); |
340 | goto out; | 346 | goto out; |
341 | } | 347 | } |
342 | unlock_kprobes(); | 348 | reset_current_kprobe(); |
343 | out: | 349 | out: |
344 | preempt_enable_no_resched(); | 350 | preempt_enable_no_resched(); |
345 | 351 | ||
@@ -354,19 +360,20 @@ out: | |||
354 | return 1; | 360 | return 1; |
355 | } | 361 | } |
356 | 362 | ||
357 | /* Interrupts disabled, kprobe_lock held. */ | ||
358 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 363 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
359 | { | 364 | { |
360 | if (current_kprobe->fault_handler | 365 | struct kprobe *cur = kprobe_running(); |
361 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 366 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
367 | |||
368 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
362 | return 1; | 369 | return 1; |
363 | 370 | ||
364 | if (kprobe_status & KPROBE_HIT_SS) { | 371 | if (kcb->kprobe_status & KPROBE_HIT_SS) { |
365 | resume_execution(current_kprobe, regs); | 372 | resume_execution(cur, regs); |
366 | regs->msr &= ~MSR_SE; | 373 | regs->msr &= ~MSR_SE; |
367 | regs->msr |= kprobe_saved_msr; | 374 | regs->msr |= kcb->kprobe_saved_msr; |
368 | 375 | ||
369 | unlock_kprobes(); | 376 | reset_current_kprobe(); |
370 | preempt_enable_no_resched(); | 377 | preempt_enable_no_resched(); |
371 | } | 378 | } |
372 | return 0; | 379 | return 0; |
@@ -381,11 +388,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
381 | struct die_args *args = (struct die_args *)data; | 388 | struct die_args *args = (struct die_args *)data; |
382 | int ret = NOTIFY_DONE; | 389 | int ret = NOTIFY_DONE; |
383 | 390 | ||
384 | /* | ||
385 | * Interrupts are not disabled here. We need to disable | ||
386 | * preemption, because kprobe_running() uses smp_processor_id(). | ||
387 | */ | ||
388 | preempt_disable(); | ||
389 | switch (val) { | 391 | switch (val) { |
390 | case DIE_BPT: | 392 | case DIE_BPT: |
391 | if (kprobe_handler(args->regs)) | 393 | if (kprobe_handler(args->regs)) |
@@ -396,22 +398,25 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
396 | ret = NOTIFY_STOP; | 398 | ret = NOTIFY_STOP; |
397 | break; | 399 | break; |
398 | case DIE_PAGE_FAULT: | 400 | case DIE_PAGE_FAULT: |
401 | /* kprobe_running() needs smp_processor_id() */ | ||
402 | preempt_disable(); | ||
399 | if (kprobe_running() && | 403 | if (kprobe_running() && |
400 | kprobe_fault_handler(args->regs, args->trapnr)) | 404 | kprobe_fault_handler(args->regs, args->trapnr)) |
401 | ret = NOTIFY_STOP; | 405 | ret = NOTIFY_STOP; |
406 | preempt_enable(); | ||
402 | break; | 407 | break; |
403 | default: | 408 | default: |
404 | break; | 409 | break; |
405 | } | 410 | } |
406 | preempt_enable_no_resched(); | ||
407 | return ret; | 411 | return ret; |
408 | } | 412 | } |
409 | 413 | ||
410 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 414 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
411 | { | 415 | { |
412 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 416 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
417 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
413 | 418 | ||
414 | memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs)); | 419 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
415 | 420 | ||
416 | /* setup return addr to the jprobe handler routine */ | 421 | /* setup return addr to the jprobe handler routine */ |
417 | regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); | 422 | regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); |
@@ -431,12 +436,15 @@ void __kprobes jprobe_return_end(void) | |||
431 | 436 | ||
432 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 437 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
433 | { | 438 | { |
439 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
440 | |||
434 | /* | 441 | /* |
435 | * FIXME - we should ideally be validating that we got here 'cos | 442 | * FIXME - we should ideally be validating that we got here 'cos |
436 | * of the "trap" in jprobe_return() above, before restoring the | 443 | * of the "trap" in jprobe_return() above, before restoring the |
437 | * saved regs... | 444 | * saved regs... |
438 | */ | 445 | */ |
439 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | 446 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
447 | preempt_enable_no_resched(); | ||
440 | return 1; | 448 | return 1; |
441 | } | 449 | } |
442 | 450 | ||
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c index cae19bbd5acd..3e7b2f28ec83 100644 --- a/arch/ppc64/kernel/lparcfg.c +++ b/arch/ppc64/kernel/lparcfg.c | |||
@@ -26,14 +26,14 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/seq_file.h> | 27 | #include <linux/seq_file.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/iSeries/HvLpConfig.h> | 29 | #include <asm/iseries/hv_lp_config.h> |
30 | #include <asm/lppaca.h> | 30 | #include <asm/lppaca.h> |
31 | #include <asm/hvcall.h> | 31 | #include <asm/hvcall.h> |
32 | #include <asm/firmware.h> | 32 | #include <asm/firmware.h> |
33 | #include <asm/rtas.h> | 33 | #include <asm/rtas.h> |
34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
35 | #include <asm/time.h> | 35 | #include <asm/time.h> |
36 | #include <asm/iSeries/ItExtVpdPanel.h> | 36 | #include <asm/iseries/it_exp_vpd_panel.h> |
37 | #include <asm/prom.h> | 37 | #include <asm/prom.h> |
38 | 38 | ||
39 | #define MODULE_VERS "1.6" | 39 | #define MODULE_VERS "1.6" |
@@ -599,9 +599,7 @@ int __init lparcfg_init(void) | |||
599 | void __exit lparcfg_cleanup(void) | 599 | void __exit lparcfg_cleanup(void) |
600 | { | 600 | { |
601 | if (proc_ppc64_lparcfg) { | 601 | if (proc_ppc64_lparcfg) { |
602 | if (proc_ppc64_lparcfg->data) { | 602 | kfree(proc_ppc64_lparcfg->data); |
603 | kfree(proc_ppc64_lparcfg->data); | ||
604 | } | ||
605 | remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent); | 603 | remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent); |
606 | } | 604 | } |
607 | } | 605 | } |
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c index bf7cc4f8210f..ff8679f260f3 100644 --- a/arch/ppc64/kernel/machine_kexec.c +++ b/arch/ppc64/kernel/machine_kexec.c | |||
@@ -244,7 +244,6 @@ static void kexec_prepare_cpus(void) | |||
244 | 244 | ||
245 | static void kexec_prepare_cpus(void) | 245 | static void kexec_prepare_cpus(void) |
246 | { | 246 | { |
247 | extern void smp_release_cpus(void); | ||
248 | /* | 247 | /* |
249 | * move the secondarys to us so that we can copy | 248 | * move the secondarys to us so that we can copy |
250 | * the new kernel 0-0x100 safely | 249 | * the new kernel 0-0x100 safely |
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c index 33a2d8db3f21..3133c72b28ec 100644 --- a/arch/ppc64/kernel/pacaData.c +++ b/arch/ppc64/kernel/pacaData.c | |||
@@ -17,13 +17,13 @@ | |||
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | 18 | ||
19 | #include <asm/lppaca.h> | 19 | #include <asm/lppaca.h> |
20 | #include <asm/iSeries/ItLpQueue.h> | 20 | #include <asm/iseries/it_lp_queue.h> |
21 | #include <asm/paca.h> | 21 | #include <asm/paca.h> |
22 | 22 | ||
23 | static union { | 23 | static union { |
24 | struct systemcfg data; | 24 | struct systemcfg data; |
25 | u8 page[PAGE_SIZE]; | 25 | u8 page[PAGE_SIZE]; |
26 | } systemcfg_store __page_aligned; | 26 | } systemcfg_store __attribute__((__section__(".data.page.aligned"))); |
27 | struct systemcfg *systemcfg = &systemcfg_store.data; | 27 | struct systemcfg *systemcfg = &systemcfg_store.data; |
28 | EXPORT_SYMBOL(systemcfg); | 28 | EXPORT_SYMBOL(systemcfg); |
29 | 29 | ||
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c index 493bbe43f5b4..1a443a7ada4c 100644 --- a/arch/ppc64/kernel/pci_dn.c +++ b/arch/ppc64/kernel/pci_dn.c | |||
@@ -181,13 +181,14 @@ EXPORT_SYMBOL(fetch_dev_dn); | |||
181 | static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) | 181 | static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) |
182 | { | 182 | { |
183 | struct device_node *np = node; | 183 | struct device_node *np = node; |
184 | struct pci_dn *pci; | 184 | struct pci_dn *pci = NULL; |
185 | int err = NOTIFY_OK; | 185 | int err = NOTIFY_OK; |
186 | 186 | ||
187 | switch (action) { | 187 | switch (action) { |
188 | case PSERIES_RECONFIG_ADD: | 188 | case PSERIES_RECONFIG_ADD: |
189 | pci = np->parent->data; | 189 | pci = np->parent->data; |
190 | update_dn_pci_info(np, pci->phb); | 190 | if (pci) |
191 | update_dn_pci_info(np, pci->phb); | ||
191 | break; | 192 | break; |
192 | default: | 193 | default: |
193 | err = NOTIFY_DONE; | 194 | err = NOTIFY_DONE; |
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/ppc64/kernel/proc_ppc64.c index a87c66a9652a..24e955ee9487 100644 --- a/arch/ppc64/kernel/proc_ppc64.c +++ b/arch/ppc64/kernel/proc_ppc64.c | |||
@@ -53,7 +53,7 @@ static int __init proc_ppc64_create(void) | |||
53 | if (!root) | 53 | if (!root) |
54 | return 1; | 54 | return 1; |
55 | 55 | ||
56 | if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_BPA))) | 56 | if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_CELL))) |
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | if (!proc_mkdir("rtas", root)) | 59 | if (!proc_mkdir("rtas", root)) |
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c index 97bfceb5353b..dece31e58bc4 100644 --- a/arch/ppc64/kernel/prom.c +++ b/arch/ppc64/kernel/prom.c | |||
@@ -635,10 +635,10 @@ static inline char *find_flat_dt_string(u32 offset) | |||
635 | * used to extract the memory informations at boot before we can | 635 | * used to extract the memory informations at boot before we can |
636 | * unflatten the tree | 636 | * unflatten the tree |
637 | */ | 637 | */ |
638 | static int __init scan_flat_dt(int (*it)(unsigned long node, | 638 | int __init of_scan_flat_dt(int (*it)(unsigned long node, |
639 | const char *uname, int depth, | 639 | const char *uname, int depth, |
640 | void *data), | 640 | void *data), |
641 | void *data) | 641 | void *data) |
642 | { | 642 | { |
643 | unsigned long p = ((unsigned long)initial_boot_params) + | 643 | unsigned long p = ((unsigned long)initial_boot_params) + |
644 | initial_boot_params->off_dt_struct; | 644 | initial_boot_params->off_dt_struct; |
@@ -695,8 +695,8 @@ static int __init scan_flat_dt(int (*it)(unsigned long node, | |||
695 | * This function can be used within scan_flattened_dt callback to get | 695 | * This function can be used within scan_flattened_dt callback to get |
696 | * access to properties | 696 | * access to properties |
697 | */ | 697 | */ |
698 | static void* __init get_flat_dt_prop(unsigned long node, const char *name, | 698 | void* __init of_get_flat_dt_prop(unsigned long node, const char *name, |
699 | unsigned long *size) | 699 | unsigned long *size) |
700 | { | 700 | { |
701 | unsigned long p = node; | 701 | unsigned long p = node; |
702 | 702 | ||
@@ -996,7 +996,7 @@ void __init unflatten_device_tree(void) | |||
996 | static int __init early_init_dt_scan_cpus(unsigned long node, | 996 | static int __init early_init_dt_scan_cpus(unsigned long node, |
997 | const char *uname, int depth, void *data) | 997 | const char *uname, int depth, void *data) |
998 | { | 998 | { |
999 | char *type = get_flat_dt_prop(node, "device_type", NULL); | 999 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
1000 | u32 *prop; | 1000 | u32 *prop; |
1001 | unsigned long size; | 1001 | unsigned long size; |
1002 | 1002 | ||
@@ -1004,17 +1004,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
1004 | if (type == NULL || strcmp(type, "cpu") != 0) | 1004 | if (type == NULL || strcmp(type, "cpu") != 0) |
1005 | return 0; | 1005 | return 0; |
1006 | 1006 | ||
1007 | /* On LPAR, look for the first ibm,pft-size property for the hash table size | ||
1008 | */ | ||
1009 | if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) { | ||
1010 | u32 *pft_size; | ||
1011 | pft_size = (u32 *)get_flat_dt_prop(node, "ibm,pft-size", NULL); | ||
1012 | if (pft_size != NULL) { | ||
1013 | /* pft_size[0] is the NUMA CEC cookie */ | ||
1014 | ppc64_pft_size = pft_size[1]; | ||
1015 | } | ||
1016 | } | ||
1017 | |||
1018 | if (initial_boot_params && initial_boot_params->version >= 2) { | 1007 | if (initial_boot_params && initial_boot_params->version >= 2) { |
1019 | /* version 2 of the kexec param format adds the phys cpuid | 1008 | /* version 2 of the kexec param format adds the phys cpuid |
1020 | * of booted proc. | 1009 | * of booted proc. |
@@ -1023,8 +1012,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
1023 | boot_cpuid = 0; | 1012 | boot_cpuid = 0; |
1024 | } else { | 1013 | } else { |
1025 | /* Check if it's the boot-cpu, set it's hw index in paca now */ | 1014 | /* Check if it's the boot-cpu, set it's hw index in paca now */ |
1026 | if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { | 1015 | if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL) |
1027 | u32 *prop = get_flat_dt_prop(node, "reg", NULL); | 1016 | != NULL) { |
1017 | u32 *prop = of_get_flat_dt_prop(node, "reg", NULL); | ||
1028 | set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop); | 1018 | set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop); |
1029 | boot_cpuid_phys = get_hard_smp_processor_id(0); | 1019 | boot_cpuid_phys = get_hard_smp_processor_id(0); |
1030 | } | 1020 | } |
@@ -1032,14 +1022,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
1032 | 1022 | ||
1033 | #ifdef CONFIG_ALTIVEC | 1023 | #ifdef CONFIG_ALTIVEC |
1034 | /* Check if we have a VMX and eventually update CPU features */ | 1024 | /* Check if we have a VMX and eventually update CPU features */ |
1035 | prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", NULL); | 1025 | prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); |
1036 | if (prop && (*prop) > 0) { | 1026 | if (prop && (*prop) > 0) { |
1037 | cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; | 1027 | cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; |
1038 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; | 1028 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; |
1039 | } | 1029 | } |
1040 | 1030 | ||
1041 | /* Same goes for Apple's "altivec" property */ | 1031 | /* Same goes for Apple's "altivec" property */ |
1042 | prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL); | 1032 | prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); |
1043 | if (prop) { | 1033 | if (prop) { |
1044 | cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; | 1034 | cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; |
1045 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; | 1035 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; |
@@ -1051,7 +1041,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
1051 | * this by looking at the size of the ibm,ppc-interrupt-server#s | 1041 | * this by looking at the size of the ibm,ppc-interrupt-server#s |
1052 | * property | 1042 | * property |
1053 | */ | 1043 | */ |
1054 | prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", | 1044 | prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", |
1055 | &size); | 1045 | &size); |
1056 | cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; | 1046 | cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; |
1057 | if (prop && ((size / sizeof(u32)) > 1)) | 1047 | if (prop && ((size / sizeof(u32)) > 1)) |
@@ -1072,26 +1062,26 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
1072 | return 0; | 1062 | return 0; |
1073 | 1063 | ||
1074 | /* get platform type */ | 1064 | /* get platform type */ |
1075 | prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL); | 1065 | prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL); |
1076 | if (prop == NULL) | 1066 | if (prop == NULL) |
1077 | return 0; | 1067 | return 0; |
1078 | systemcfg->platform = *prop; | 1068 | systemcfg->platform = *prop; |
1079 | 1069 | ||
1080 | /* check if iommu is forced on or off */ | 1070 | /* check if iommu is forced on or off */ |
1081 | if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) | 1071 | if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) |
1082 | iommu_is_off = 1; | 1072 | iommu_is_off = 1; |
1083 | if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) | 1073 | if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) |
1084 | iommu_force_on = 1; | 1074 | iommu_force_on = 1; |
1085 | 1075 | ||
1086 | prop64 = (u64*)get_flat_dt_prop(node, "linux,memory-limit", NULL); | 1076 | prop64 = (u64*)of_get_flat_dt_prop(node, "linux,memory-limit", NULL); |
1087 | if (prop64) | 1077 | if (prop64) |
1088 | memory_limit = *prop64; | 1078 | memory_limit = *prop64; |
1089 | 1079 | ||
1090 | prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); | 1080 | prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-start",NULL); |
1091 | if (prop64) | 1081 | if (prop64) |
1092 | tce_alloc_start = *prop64; | 1082 | tce_alloc_start = *prop64; |
1093 | 1083 | ||
1094 | prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); | 1084 | prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); |
1095 | if (prop64) | 1085 | if (prop64) |
1096 | tce_alloc_end = *prop64; | 1086 | tce_alloc_end = *prop64; |
1097 | 1087 | ||
@@ -1102,9 +1092,12 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
1102 | { | 1092 | { |
1103 | u64 *basep, *entryp; | 1093 | u64 *basep, *entryp; |
1104 | 1094 | ||
1105 | basep = (u64*)get_flat_dt_prop(node, "linux,rtas-base", NULL); | 1095 | basep = (u64*)of_get_flat_dt_prop(node, |
1106 | entryp = (u64*)get_flat_dt_prop(node, "linux,rtas-entry", NULL); | 1096 | "linux,rtas-base", NULL); |
1107 | prop = (u32*)get_flat_dt_prop(node, "linux,rtas-size", NULL); | 1097 | entryp = (u64*)of_get_flat_dt_prop(node, |
1098 | "linux,rtas-entry", NULL); | ||
1099 | prop = (u32*)of_get_flat_dt_prop(node, | ||
1100 | "linux,rtas-size", NULL); | ||
1108 | if (basep && entryp && prop) { | 1101 | if (basep && entryp && prop) { |
1109 | rtas.base = *basep; | 1102 | rtas.base = *basep; |
1110 | rtas.entry = *entryp; | 1103 | rtas.entry = *entryp; |
@@ -1125,11 +1118,11 @@ static int __init early_init_dt_scan_root(unsigned long node, | |||
1125 | if (depth != 0) | 1118 | if (depth != 0) |
1126 | return 0; | 1119 | return 0; |
1127 | 1120 | ||
1128 | prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); | 1121 | prop = (u32 *)of_get_flat_dt_prop(node, "#size-cells", NULL); |
1129 | dt_root_size_cells = (prop == NULL) ? 1 : *prop; | 1122 | dt_root_size_cells = (prop == NULL) ? 1 : *prop; |
1130 | DBG("dt_root_size_cells = %x\n", dt_root_size_cells); | 1123 | DBG("dt_root_size_cells = %x\n", dt_root_size_cells); |
1131 | 1124 | ||
1132 | prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); | 1125 | prop = (u32 *)of_get_flat_dt_prop(node, "#address-cells", NULL); |
1133 | dt_root_addr_cells = (prop == NULL) ? 2 : *prop; | 1126 | dt_root_addr_cells = (prop == NULL) ? 2 : *prop; |
1134 | DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); | 1127 | DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); |
1135 | 1128 | ||
@@ -1161,7 +1154,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) | |||
1161 | static int __init early_init_dt_scan_memory(unsigned long node, | 1154 | static int __init early_init_dt_scan_memory(unsigned long node, |
1162 | const char *uname, int depth, void *data) | 1155 | const char *uname, int depth, void *data) |
1163 | { | 1156 | { |
1164 | char *type = get_flat_dt_prop(node, "device_type", NULL); | 1157 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
1165 | cell_t *reg, *endp; | 1158 | cell_t *reg, *endp; |
1166 | unsigned long l; | 1159 | unsigned long l; |
1167 | 1160 | ||
@@ -1169,7 +1162,7 @@ static int __init early_init_dt_scan_memory(unsigned long node, | |||
1169 | if (type == NULL || strcmp(type, "memory") != 0) | 1162 | if (type == NULL || strcmp(type, "memory") != 0) |
1170 | return 0; | 1163 | return 0; |
1171 | 1164 | ||
1172 | reg = (cell_t *)get_flat_dt_prop(node, "reg", &l); | 1165 | reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); |
1173 | if (reg == NULL) | 1166 | if (reg == NULL) |
1174 | return 0; | 1167 | return 0; |
1175 | 1168 | ||
@@ -1225,19 +1218,16 @@ void __init early_init_devtree(void *params) | |||
1225 | /* Setup flat device-tree pointer */ | 1218 | /* Setup flat device-tree pointer */ |
1226 | initial_boot_params = params; | 1219 | initial_boot_params = params; |
1227 | 1220 | ||
1228 | /* By default, hash size is not set */ | ||
1229 | ppc64_pft_size = 0; | ||
1230 | |||
1231 | /* Retreive various informations from the /chosen node of the | 1221 | /* Retreive various informations from the /chosen node of the |
1232 | * device-tree, including the platform type, initrd location and | 1222 | * device-tree, including the platform type, initrd location and |
1233 | * size, TCE reserve, and more ... | 1223 | * size, TCE reserve, and more ... |
1234 | */ | 1224 | */ |
1235 | scan_flat_dt(early_init_dt_scan_chosen, NULL); | 1225 | of_scan_flat_dt(early_init_dt_scan_chosen, NULL); |
1236 | 1226 | ||
1237 | /* Scan memory nodes and rebuild LMBs */ | 1227 | /* Scan memory nodes and rebuild LMBs */ |
1238 | lmb_init(); | 1228 | lmb_init(); |
1239 | scan_flat_dt(early_init_dt_scan_root, NULL); | 1229 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
1240 | scan_flat_dt(early_init_dt_scan_memory, NULL); | 1230 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); |
1241 | lmb_enforce_memory_limit(memory_limit); | 1231 | lmb_enforce_memory_limit(memory_limit); |
1242 | lmb_analyze(); | 1232 | lmb_analyze(); |
1243 | systemcfg->physicalMemorySize = lmb_phys_mem_size(); | 1233 | systemcfg->physicalMemorySize = lmb_phys_mem_size(); |
@@ -1253,26 +1243,8 @@ void __init early_init_devtree(void *params) | |||
1253 | /* Retreive hash table size from flattened tree plus other | 1243 | /* Retreive hash table size from flattened tree plus other |
1254 | * CPU related informations (altivec support, boot CPU ID, ...) | 1244 | * CPU related informations (altivec support, boot CPU ID, ...) |
1255 | */ | 1245 | */ |
1256 | scan_flat_dt(early_init_dt_scan_cpus, NULL); | 1246 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); |
1257 | |||
1258 | /* If hash size wasn't obtained above, we calculate it now based on | ||
1259 | * the total RAM size | ||
1260 | */ | ||
1261 | if (ppc64_pft_size == 0) { | ||
1262 | unsigned long rnd_mem_size, pteg_count; | ||
1263 | |||
1264 | /* round mem_size up to next power of 2 */ | ||
1265 | rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize); | ||
1266 | if (rnd_mem_size < systemcfg->physicalMemorySize) | ||
1267 | rnd_mem_size <<= 1; | ||
1268 | |||
1269 | /* # pages / 2 */ | ||
1270 | pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11); | ||
1271 | |||
1272 | ppc64_pft_size = __ilog2(pteg_count << 7); | ||
1273 | } | ||
1274 | 1247 | ||
1275 | DBG("Hash pftSize: %x\n", (int)ppc64_pft_size); | ||
1276 | DBG(" <- early_init_devtree()\n"); | 1248 | DBG(" <- early_init_devtree()\n"); |
1277 | } | 1249 | } |
1278 | 1250 | ||
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index 69924ba4d7d9..a4bbca6dbb8b 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c | |||
@@ -1939,9 +1939,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long | |||
1939 | prom_send_capabilities(); | 1939 | prom_send_capabilities(); |
1940 | 1940 | ||
1941 | /* | 1941 | /* |
1942 | * On pSeries and BPA, copy the CPU hold code | 1942 | * On pSeries and Cell, copy the CPU hold code |
1943 | */ | 1943 | */ |
1944 | if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA)) | 1944 | if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_CELL)) |
1945 | copy_and_flush(0, KERNELBASE - offset, 0x100, 0); | 1945 | copy_and_flush(0, KERNELBASE - offset, 0x100, 0); |
1946 | 1946 | ||
1947 | /* | 1947 | /* |
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c deleted file mode 100644 index 5bdd5b079d96..000000000000 --- a/arch/ppc64/kernel/rtas-proc.c +++ /dev/null | |||
@@ -1,808 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc64/kernel/rtas-proc.c | ||
3 | * Copyright (C) 2000 Tilmann Bitterberg | ||
4 | * (tilmann@bitterberg.de) | ||
5 | * | ||
6 | * RTAS (Runtime Abstraction Services) stuff | ||
7 | * Intention is to provide a clean user interface | ||
8 | * to use the RTAS. | ||
9 | * | ||
10 | * TODO: | ||
11 | * Split off a header file and maybe move it to a different | ||
12 | * location. Write Documentation on what the /proc/rtas/ entries | ||
13 | * actually do. | ||
14 | */ | ||
15 | |||
16 | #include <linux/errno.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <linux/stat.h> | ||
20 | #include <linux/ctype.h> | ||
21 | #include <linux/time.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/rtc.h> | ||
27 | |||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/processor.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/rtas.h> | ||
33 | #include <asm/machdep.h> /* for ppc_md */ | ||
34 | #include <asm/time.h> | ||
35 | #include <asm/systemcfg.h> | ||
36 | |||
37 | /* Token for Sensors */ | ||
38 | #define KEY_SWITCH 0x0001 | ||
39 | #define ENCLOSURE_SWITCH 0x0002 | ||
40 | #define THERMAL_SENSOR 0x0003 | ||
41 | #define LID_STATUS 0x0004 | ||
42 | #define POWER_SOURCE 0x0005 | ||
43 | #define BATTERY_VOLTAGE 0x0006 | ||
44 | #define BATTERY_REMAINING 0x0007 | ||
45 | #define BATTERY_PERCENTAGE 0x0008 | ||
46 | #define EPOW_SENSOR 0x0009 | ||
47 | #define BATTERY_CYCLESTATE 0x000a | ||
48 | #define BATTERY_CHARGING 0x000b | ||
49 | |||
50 | /* IBM specific sensors */ | ||
51 | #define IBM_SURVEILLANCE 0x2328 /* 9000 */ | ||
52 | #define IBM_FANRPM 0x2329 /* 9001 */ | ||
53 | #define IBM_VOLTAGE 0x232a /* 9002 */ | ||
54 | #define IBM_DRCONNECTOR 0x232b /* 9003 */ | ||
55 | #define IBM_POWERSUPPLY 0x232c /* 9004 */ | ||
56 | |||
57 | /* Status return values */ | ||
58 | #define SENSOR_CRITICAL_HIGH 13 | ||
59 | #define SENSOR_WARNING_HIGH 12 | ||
60 | #define SENSOR_NORMAL 11 | ||
61 | #define SENSOR_WARNING_LOW 10 | ||
62 | #define SENSOR_CRITICAL_LOW 9 | ||
63 | #define SENSOR_SUCCESS 0 | ||
64 | #define SENSOR_HW_ERROR -1 | ||
65 | #define SENSOR_BUSY -2 | ||
66 | #define SENSOR_NOT_EXIST -3 | ||
67 | #define SENSOR_DR_ENTITY -9000 | ||
68 | |||
69 | /* Location Codes */ | ||
70 | #define LOC_SCSI_DEV_ADDR 'A' | ||
71 | #define LOC_SCSI_DEV_LOC 'B' | ||
72 | #define LOC_CPU 'C' | ||
73 | #define LOC_DISKETTE 'D' | ||
74 | #define LOC_ETHERNET 'E' | ||
75 | #define LOC_FAN 'F' | ||
76 | #define LOC_GRAPHICS 'G' | ||
77 | /* reserved / not used 'H' */ | ||
78 | #define LOC_IO_ADAPTER 'I' | ||
79 | /* reserved / not used 'J' */ | ||
80 | #define LOC_KEYBOARD 'K' | ||
81 | #define LOC_LCD 'L' | ||
82 | #define LOC_MEMORY 'M' | ||
83 | #define LOC_NV_MEMORY 'N' | ||
84 | #define LOC_MOUSE 'O' | ||
85 | #define LOC_PLANAR 'P' | ||
86 | #define LOC_OTHER_IO 'Q' | ||
87 | #define LOC_PARALLEL 'R' | ||
88 | #define LOC_SERIAL 'S' | ||
89 | #define LOC_DEAD_RING 'T' | ||
90 | #define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */ | ||
91 | #define LOC_VOLTAGE 'V' | ||
92 | #define LOC_SWITCH_ADAPTER 'W' | ||
93 | #define LOC_OTHER 'X' | ||
94 | #define LOC_FIRMWARE 'Y' | ||
95 | #define LOC_SCSI 'Z' | ||
96 | |||
97 | /* Tokens for indicators */ | ||
98 | #define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/ | ||
99 | #define TONE_VOLUME 0x0002 /* 0 - 100 (%) */ | ||
100 | #define SYSTEM_POWER_STATE 0x0003 | ||
101 | #define WARNING_LIGHT 0x0004 | ||
102 | #define DISK_ACTIVITY_LIGHT 0x0005 | ||
103 | #define HEX_DISPLAY_UNIT 0x0006 | ||
104 | #define BATTERY_WARNING_TIME 0x0007 | ||
105 | #define CONDITION_CYCLE_REQUEST 0x0008 | ||
106 | #define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */ | ||
107 | #define DR_ACTION 0x2329 /* 9001 */ | ||
108 | #define DR_INDICATOR 0x232a /* 9002 */ | ||
109 | /* 9003 - 9004: Vendor specific */ | ||
110 | /* 9006 - 9999: Vendor specific */ | ||
111 | |||
112 | /* other */ | ||
113 | #define MAX_SENSORS 17 /* I only know of 17 sensors */ | ||
114 | #define MAX_LINELENGTH 256 | ||
115 | #define SENSOR_PREFIX "ibm,sensor-" | ||
116 | #define cel_to_fahr(x) ((x*9/5)+32) | ||
117 | |||
118 | |||
119 | /* Globals */ | ||
120 | static struct rtas_sensors sensors; | ||
121 | static struct device_node *rtas_node = NULL; | ||
122 | static unsigned long power_on_time = 0; /* Save the time the user set */ | ||
123 | static char progress_led[MAX_LINELENGTH]; | ||
124 | |||
125 | static unsigned long rtas_tone_frequency = 1000; | ||
126 | static unsigned long rtas_tone_volume = 0; | ||
127 | |||
128 | /* ****************STRUCTS******************************************* */ | ||
129 | struct individual_sensor { | ||
130 | unsigned int token; | ||
131 | unsigned int quant; | ||
132 | }; | ||
133 | |||
134 | struct rtas_sensors { | ||
135 | struct individual_sensor sensor[MAX_SENSORS]; | ||
136 | unsigned int quant; | ||
137 | }; | ||
138 | |||
139 | /* ****************************************************************** */ | ||
140 | /* Declarations */ | ||
141 | static int ppc_rtas_sensors_show(struct seq_file *m, void *v); | ||
142 | static int ppc_rtas_clock_show(struct seq_file *m, void *v); | ||
143 | static ssize_t ppc_rtas_clock_write(struct file *file, | ||
144 | const char __user *buf, size_t count, loff_t *ppos); | ||
145 | static int ppc_rtas_progress_show(struct seq_file *m, void *v); | ||
146 | static ssize_t ppc_rtas_progress_write(struct file *file, | ||
147 | const char __user *buf, size_t count, loff_t *ppos); | ||
148 | static int ppc_rtas_poweron_show(struct seq_file *m, void *v); | ||
149 | static ssize_t ppc_rtas_poweron_write(struct file *file, | ||
150 | const char __user *buf, size_t count, loff_t *ppos); | ||
151 | |||
152 | static ssize_t ppc_rtas_tone_freq_write(struct file *file, | ||
153 | const char __user *buf, size_t count, loff_t *ppos); | ||
154 | static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v); | ||
155 | static ssize_t ppc_rtas_tone_volume_write(struct file *file, | ||
156 | const char __user *buf, size_t count, loff_t *ppos); | ||
157 | static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v); | ||
158 | static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v); | ||
159 | |||
160 | static int sensors_open(struct inode *inode, struct file *file) | ||
161 | { | ||
162 | return single_open(file, ppc_rtas_sensors_show, NULL); | ||
163 | } | ||
164 | |||
165 | struct file_operations ppc_rtas_sensors_operations = { | ||
166 | .open = sensors_open, | ||
167 | .read = seq_read, | ||
168 | .llseek = seq_lseek, | ||
169 | .release = single_release, | ||
170 | }; | ||
171 | |||
172 | static int poweron_open(struct inode *inode, struct file *file) | ||
173 | { | ||
174 | return single_open(file, ppc_rtas_poweron_show, NULL); | ||
175 | } | ||
176 | |||
177 | struct file_operations ppc_rtas_poweron_operations = { | ||
178 | .open = poweron_open, | ||
179 | .read = seq_read, | ||
180 | .llseek = seq_lseek, | ||
181 | .write = ppc_rtas_poweron_write, | ||
182 | .release = single_release, | ||
183 | }; | ||
184 | |||
185 | static int progress_open(struct inode *inode, struct file *file) | ||
186 | { | ||
187 | return single_open(file, ppc_rtas_progress_show, NULL); | ||
188 | } | ||
189 | |||
190 | struct file_operations ppc_rtas_progress_operations = { | ||
191 | .open = progress_open, | ||
192 | .read = seq_read, | ||
193 | .llseek = seq_lseek, | ||
194 | .write = ppc_rtas_progress_write, | ||
195 | .release = single_release, | ||
196 | }; | ||
197 | |||
198 | static int clock_open(struct inode *inode, struct file *file) | ||
199 | { | ||
200 | return single_open(file, ppc_rtas_clock_show, NULL); | ||
201 | } | ||
202 | |||
203 | struct file_operations ppc_rtas_clock_operations = { | ||
204 | .open = clock_open, | ||
205 | .read = seq_read, | ||
206 | .llseek = seq_lseek, | ||
207 | .write = ppc_rtas_clock_write, | ||
208 | .release = single_release, | ||
209 | }; | ||
210 | |||
211 | static int tone_freq_open(struct inode *inode, struct file *file) | ||
212 | { | ||
213 | return single_open(file, ppc_rtas_tone_freq_show, NULL); | ||
214 | } | ||
215 | |||
216 | struct file_operations ppc_rtas_tone_freq_operations = { | ||
217 | .open = tone_freq_open, | ||
218 | .read = seq_read, | ||
219 | .llseek = seq_lseek, | ||
220 | .write = ppc_rtas_tone_freq_write, | ||
221 | .release = single_release, | ||
222 | }; | ||
223 | |||
224 | static int tone_volume_open(struct inode *inode, struct file *file) | ||
225 | { | ||
226 | return single_open(file, ppc_rtas_tone_volume_show, NULL); | ||
227 | } | ||
228 | |||
229 | struct file_operations ppc_rtas_tone_volume_operations = { | ||
230 | .open = tone_volume_open, | ||
231 | .read = seq_read, | ||
232 | .llseek = seq_lseek, | ||
233 | .write = ppc_rtas_tone_volume_write, | ||
234 | .release = single_release, | ||
235 | }; | ||
236 | |||
237 | static int rmo_buf_open(struct inode *inode, struct file *file) | ||
238 | { | ||
239 | return single_open(file, ppc_rtas_rmo_buf_show, NULL); | ||
240 | } | ||
241 | |||
242 | struct file_operations ppc_rtas_rmo_buf_ops = { | ||
243 | .open = rmo_buf_open, | ||
244 | .read = seq_read, | ||
245 | .llseek = seq_lseek, | ||
246 | .release = single_release, | ||
247 | }; | ||
248 | |||
249 | static int ppc_rtas_find_all_sensors(void); | ||
250 | static void ppc_rtas_process_sensor(struct seq_file *m, | ||
251 | struct individual_sensor *s, int state, int error, char *loc); | ||
252 | static char *ppc_rtas_process_error(int error); | ||
253 | static void get_location_code(struct seq_file *m, | ||
254 | struct individual_sensor *s, char *loc); | ||
255 | static void check_location_string(struct seq_file *m, char *c); | ||
256 | static void check_location(struct seq_file *m, char *c); | ||
257 | |||
258 | static int __init proc_rtas_init(void) | ||
259 | { | ||
260 | struct proc_dir_entry *entry; | ||
261 | |||
262 | if (!(systemcfg->platform & PLATFORM_PSERIES)) | ||
263 | return 1; | ||
264 | |||
265 | rtas_node = of_find_node_by_name(NULL, "rtas"); | ||
266 | if (rtas_node == NULL) | ||
267 | return 1; | ||
268 | |||
269 | entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL); | ||
270 | if (entry) | ||
271 | entry->proc_fops = &ppc_rtas_progress_operations; | ||
272 | |||
273 | entry = create_proc_entry("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL); | ||
274 | if (entry) | ||
275 | entry->proc_fops = &ppc_rtas_clock_operations; | ||
276 | |||
277 | entry = create_proc_entry("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL); | ||
278 | if (entry) | ||
279 | entry->proc_fops = &ppc_rtas_poweron_operations; | ||
280 | |||
281 | entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL); | ||
282 | if (entry) | ||
283 | entry->proc_fops = &ppc_rtas_sensors_operations; | ||
284 | |||
285 | entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO, | ||
286 | NULL); | ||
287 | if (entry) | ||
288 | entry->proc_fops = &ppc_rtas_tone_freq_operations; | ||
289 | |||
290 | entry = create_proc_entry("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL); | ||
291 | if (entry) | ||
292 | entry->proc_fops = &ppc_rtas_tone_volume_operations; | ||
293 | |||
294 | entry = create_proc_entry("ppc64/rtas/rmo_buffer", S_IRUSR, NULL); | ||
295 | if (entry) | ||
296 | entry->proc_fops = &ppc_rtas_rmo_buf_ops; | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | __initcall(proc_rtas_init); | ||
302 | |||
303 | static int parse_number(const char __user *p, size_t count, unsigned long *val) | ||
304 | { | ||
305 | char buf[40]; | ||
306 | char *end; | ||
307 | |||
308 | if (count > 39) | ||
309 | return -EINVAL; | ||
310 | |||
311 | if (copy_from_user(buf, p, count)) | ||
312 | return -EFAULT; | ||
313 | |||
314 | buf[count] = 0; | ||
315 | |||
316 | *val = simple_strtoul(buf, &end, 10); | ||
317 | if (*end && *end != '\n') | ||
318 | return -EINVAL; | ||
319 | |||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | /* ****************************************************************** */ | ||
324 | /* POWER-ON-TIME */ | ||
325 | /* ****************************************************************** */ | ||
326 | static ssize_t ppc_rtas_poweron_write(struct file *file, | ||
327 | const char __user *buf, size_t count, loff_t *ppos) | ||
328 | { | ||
329 | struct rtc_time tm; | ||
330 | unsigned long nowtime; | ||
331 | int error = parse_number(buf, count, &nowtime); | ||
332 | if (error) | ||
333 | return error; | ||
334 | |||
335 | power_on_time = nowtime; /* save the time */ | ||
336 | |||
337 | to_tm(nowtime, &tm); | ||
338 | |||
339 | error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL, | ||
340 | tm.tm_year, tm.tm_mon, tm.tm_mday, | ||
341 | tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */); | ||
342 | if (error) | ||
343 | printk(KERN_WARNING "error: setting poweron time returned: %s\n", | ||
344 | ppc_rtas_process_error(error)); | ||
345 | return count; | ||
346 | } | ||
347 | /* ****************************************************************** */ | ||
348 | static int ppc_rtas_poweron_show(struct seq_file *m, void *v) | ||
349 | { | ||
350 | if (power_on_time == 0) | ||
351 | seq_printf(m, "Power on time not set\n"); | ||
352 | else | ||
353 | seq_printf(m, "%lu\n",power_on_time); | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /* ****************************************************************** */ | ||
358 | /* PROGRESS */ | ||
359 | /* ****************************************************************** */ | ||
360 | static ssize_t ppc_rtas_progress_write(struct file *file, | ||
361 | const char __user *buf, size_t count, loff_t *ppos) | ||
362 | { | ||
363 | unsigned long hex; | ||
364 | |||
365 | if (count >= MAX_LINELENGTH) | ||
366 | count = MAX_LINELENGTH -1; | ||
367 | if (copy_from_user(progress_led, buf, count)) { /* save the string */ | ||
368 | return -EFAULT; | ||
369 | } | ||
370 | progress_led[count] = 0; | ||
371 | |||
372 | /* Lets see if the user passed hexdigits */ | ||
373 | hex = simple_strtoul(progress_led, NULL, 10); | ||
374 | |||
375 | rtas_progress ((char *)progress_led, hex); | ||
376 | return count; | ||
377 | |||
378 | /* clear the line */ | ||
379 | /* rtas_progress(" ", 0xffff);*/ | ||
380 | } | ||
381 | /* ****************************************************************** */ | ||
382 | static int ppc_rtas_progress_show(struct seq_file *m, void *v) | ||
383 | { | ||
384 | if (progress_led) | ||
385 | seq_printf(m, "%s\n", progress_led); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | /* ****************************************************************** */ | ||
390 | /* CLOCK */ | ||
391 | /* ****************************************************************** */ | ||
392 | static ssize_t ppc_rtas_clock_write(struct file *file, | ||
393 | const char __user *buf, size_t count, loff_t *ppos) | ||
394 | { | ||
395 | struct rtc_time tm; | ||
396 | unsigned long nowtime; | ||
397 | int error = parse_number(buf, count, &nowtime); | ||
398 | if (error) | ||
399 | return error; | ||
400 | |||
401 | to_tm(nowtime, &tm); | ||
402 | error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL, | ||
403 | tm.tm_year, tm.tm_mon, tm.tm_mday, | ||
404 | tm.tm_hour, tm.tm_min, tm.tm_sec, 0); | ||
405 | if (error) | ||
406 | printk(KERN_WARNING "error: setting the clock returned: %s\n", | ||
407 | ppc_rtas_process_error(error)); | ||
408 | return count; | ||
409 | } | ||
410 | /* ****************************************************************** */ | ||
411 | static int ppc_rtas_clock_show(struct seq_file *m, void *v) | ||
412 | { | ||
413 | int ret[8]; | ||
414 | int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); | ||
415 | |||
416 | if (error) { | ||
417 | printk(KERN_WARNING "error: reading the clock returned: %s\n", | ||
418 | ppc_rtas_process_error(error)); | ||
419 | seq_printf(m, "0"); | ||
420 | } else { | ||
421 | unsigned int year, mon, day, hour, min, sec; | ||
422 | year = ret[0]; mon = ret[1]; day = ret[2]; | ||
423 | hour = ret[3]; min = ret[4]; sec = ret[5]; | ||
424 | seq_printf(m, "%lu\n", | ||
425 | mktime(year, mon, day, hour, min, sec)); | ||
426 | } | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | /* ****************************************************************** */ | ||
431 | /* SENSOR STUFF */ | ||
432 | /* ****************************************************************** */ | ||
433 | static int ppc_rtas_sensors_show(struct seq_file *m, void *v) | ||
434 | { | ||
435 | int i,j; | ||
436 | int state, error; | ||
437 | int get_sensor_state = rtas_token("get-sensor-state"); | ||
438 | |||
439 | seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n"); | ||
440 | seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n"); | ||
441 | seq_printf(m, "********************************************************\n"); | ||
442 | |||
443 | if (ppc_rtas_find_all_sensors() != 0) { | ||
444 | seq_printf(m, "\nNo sensors are available\n"); | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | for (i=0; i<sensors.quant; i++) { | ||
449 | struct individual_sensor *p = &sensors.sensor[i]; | ||
450 | char rstr[64]; | ||
451 | char *loc; | ||
452 | int llen, offs; | ||
453 | |||
454 | sprintf (rstr, SENSOR_PREFIX"%04d", p->token); | ||
455 | loc = (char *) get_property(rtas_node, rstr, &llen); | ||
456 | |||
457 | /* A sensor may have multiple instances */ | ||
458 | for (j = 0, offs = 0; j <= p->quant; j++) { | ||
459 | error = rtas_call(get_sensor_state, 2, 2, &state, | ||
460 | p->token, j); | ||
461 | |||
462 | ppc_rtas_process_sensor(m, p, state, error, loc); | ||
463 | seq_putc(m, '\n'); | ||
464 | if (loc) { | ||
465 | offs += strlen(loc) + 1; | ||
466 | loc += strlen(loc) + 1; | ||
467 | if (offs >= llen) | ||
468 | loc = NULL; | ||
469 | } | ||
470 | } | ||
471 | } | ||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* ****************************************************************** */ | ||
476 | |||
477 | static int ppc_rtas_find_all_sensors(void) | ||
478 | { | ||
479 | unsigned int *utmp; | ||
480 | int len, i; | ||
481 | |||
482 | utmp = (unsigned int *) get_property(rtas_node, "rtas-sensors", &len); | ||
483 | if (utmp == NULL) { | ||
484 | printk (KERN_ERR "error: could not get rtas-sensors\n"); | ||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | sensors.quant = len / 8; /* int + int */ | ||
489 | |||
490 | for (i=0; i<sensors.quant; i++) { | ||
491 | sensors.sensor[i].token = *utmp++; | ||
492 | sensors.sensor[i].quant = *utmp++; | ||
493 | } | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | /* ****************************************************************** */ | ||
498 | /* | ||
499 | * Builds a string of what rtas returned | ||
500 | */ | ||
501 | static char *ppc_rtas_process_error(int error) | ||
502 | { | ||
503 | switch (error) { | ||
504 | case SENSOR_CRITICAL_HIGH: | ||
505 | return "(critical high)"; | ||
506 | case SENSOR_WARNING_HIGH: | ||
507 | return "(warning high)"; | ||
508 | case SENSOR_NORMAL: | ||
509 | return "(normal)"; | ||
510 | case SENSOR_WARNING_LOW: | ||
511 | return "(warning low)"; | ||
512 | case SENSOR_CRITICAL_LOW: | ||
513 | return "(critical low)"; | ||
514 | case SENSOR_SUCCESS: | ||
515 | return "(read ok)"; | ||
516 | case SENSOR_HW_ERROR: | ||
517 | return "(hardware error)"; | ||
518 | case SENSOR_BUSY: | ||
519 | return "(busy)"; | ||
520 | case SENSOR_NOT_EXIST: | ||
521 | return "(non existent)"; | ||
522 | case SENSOR_DR_ENTITY: | ||
523 | return "(dr entity removed)"; | ||
524 | default: | ||
525 | return "(UNKNOWN)"; | ||
526 | } | ||
527 | } | ||
528 | |||
529 | /* ****************************************************************** */ | ||
530 | /* | ||
531 | * Builds a string out of what the sensor said | ||
532 | */ | ||
533 | |||
534 | static void ppc_rtas_process_sensor(struct seq_file *m, | ||
535 | struct individual_sensor *s, int state, int error, char *loc) | ||
536 | { | ||
537 | /* Defined return vales */ | ||
538 | const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", | ||
539 | "Maintenance" }; | ||
540 | const char * enclosure_switch[] = { "Closed", "Open" }; | ||
541 | const char * lid_status[] = { " ", "Open", "Closed" }; | ||
542 | const char * power_source[] = { "AC\t", "Battery", | ||
543 | "AC & Battery" }; | ||
544 | const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" }; | ||
545 | const char * epow_sensor[] = { | ||
546 | "EPOW Reset", "Cooling warning", "Power warning", | ||
547 | "System shutdown", "System halt", "EPOW main enclosure", | ||
548 | "EPOW power off" }; | ||
549 | const char * battery_cyclestate[] = { "None", "In progress", | ||
550 | "Requested" }; | ||
551 | const char * battery_charging[] = { "Charging", "Discharching", | ||
552 | "No current flow" }; | ||
553 | const char * ibm_drconnector[] = { "Empty", "Present", "Unusable", | ||
554 | "Exchange" }; | ||
555 | |||
556 | int have_strings = 0; | ||
557 | int num_states = 0; | ||
558 | int temperature = 0; | ||
559 | int unknown = 0; | ||
560 | |||
561 | /* What kind of sensor do we have here? */ | ||
562 | |||
563 | switch (s->token) { | ||
564 | case KEY_SWITCH: | ||
565 | seq_printf(m, "Key switch:\t"); | ||
566 | num_states = sizeof(key_switch) / sizeof(char *); | ||
567 | if (state < num_states) { | ||
568 | seq_printf(m, "%s\t", key_switch[state]); | ||
569 | have_strings = 1; | ||
570 | } | ||
571 | break; | ||
572 | case ENCLOSURE_SWITCH: | ||
573 | seq_printf(m, "Enclosure switch:\t"); | ||
574 | num_states = sizeof(enclosure_switch) / sizeof(char *); | ||
575 | if (state < num_states) { | ||
576 | seq_printf(m, "%s\t", | ||
577 | enclosure_switch[state]); | ||
578 | have_strings = 1; | ||
579 | } | ||
580 | break; | ||
581 | case THERMAL_SENSOR: | ||
582 | seq_printf(m, "Temp. (C/F):\t"); | ||
583 | temperature = 1; | ||
584 | break; | ||
585 | case LID_STATUS: | ||
586 | seq_printf(m, "Lid status:\t"); | ||
587 | num_states = sizeof(lid_status) / sizeof(char *); | ||
588 | if (state < num_states) { | ||
589 | seq_printf(m, "%s\t", lid_status[state]); | ||
590 | have_strings = 1; | ||
591 | } | ||
592 | break; | ||
593 | case POWER_SOURCE: | ||
594 | seq_printf(m, "Power source:\t"); | ||
595 | num_states = sizeof(power_source) / sizeof(char *); | ||
596 | if (state < num_states) { | ||
597 | seq_printf(m, "%s\t", | ||
598 | power_source[state]); | ||
599 | have_strings = 1; | ||
600 | } | ||
601 | break; | ||
602 | case BATTERY_VOLTAGE: | ||
603 | seq_printf(m, "Battery voltage:\t"); | ||
604 | break; | ||
605 | case BATTERY_REMAINING: | ||
606 | seq_printf(m, "Battery remaining:\t"); | ||
607 | num_states = sizeof(battery_remaining) / sizeof(char *); | ||
608 | if (state < num_states) | ||
609 | { | ||
610 | seq_printf(m, "%s\t", | ||
611 | battery_remaining[state]); | ||
612 | have_strings = 1; | ||
613 | } | ||
614 | break; | ||
615 | case BATTERY_PERCENTAGE: | ||
616 | seq_printf(m, "Battery percentage:\t"); | ||
617 | break; | ||
618 | case EPOW_SENSOR: | ||
619 | seq_printf(m, "EPOW Sensor:\t"); | ||
620 | num_states = sizeof(epow_sensor) / sizeof(char *); | ||
621 | if (state < num_states) { | ||
622 | seq_printf(m, "%s\t", epow_sensor[state]); | ||
623 | have_strings = 1; | ||
624 | } | ||
625 | break; | ||
626 | case BATTERY_CYCLESTATE: | ||
627 | seq_printf(m, "Battery cyclestate:\t"); | ||
628 | num_states = sizeof(battery_cyclestate) / | ||
629 | sizeof(char *); | ||
630 | if (state < num_states) { | ||
631 | seq_printf(m, "%s\t", | ||
632 | battery_cyclestate[state]); | ||
633 | have_strings = 1; | ||
634 | } | ||
635 | break; | ||
636 | case BATTERY_CHARGING: | ||
637 | seq_printf(m, "Battery Charging:\t"); | ||
638 | num_states = sizeof(battery_charging) / sizeof(char *); | ||
639 | if (state < num_states) { | ||
640 | seq_printf(m, "%s\t", | ||
641 | battery_charging[state]); | ||
642 | have_strings = 1; | ||
643 | } | ||
644 | break; | ||
645 | case IBM_SURVEILLANCE: | ||
646 | seq_printf(m, "Surveillance:\t"); | ||
647 | break; | ||
648 | case IBM_FANRPM: | ||
649 | seq_printf(m, "Fan (rpm):\t"); | ||
650 | break; | ||
651 | case IBM_VOLTAGE: | ||
652 | seq_printf(m, "Voltage (mv):\t"); | ||
653 | break; | ||
654 | case IBM_DRCONNECTOR: | ||
655 | seq_printf(m, "DR connector:\t"); | ||
656 | num_states = sizeof(ibm_drconnector) / sizeof(char *); | ||
657 | if (state < num_states) { | ||
658 | seq_printf(m, "%s\t", | ||
659 | ibm_drconnector[state]); | ||
660 | have_strings = 1; | ||
661 | } | ||
662 | break; | ||
663 | case IBM_POWERSUPPLY: | ||
664 | seq_printf(m, "Powersupply:\t"); | ||
665 | break; | ||
666 | default: | ||
667 | seq_printf(m, "Unknown sensor (type %d), ignoring it\n", | ||
668 | s->token); | ||
669 | unknown = 1; | ||
670 | have_strings = 1; | ||
671 | break; | ||
672 | } | ||
673 | if (have_strings == 0) { | ||
674 | if (temperature) { | ||
675 | seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state)); | ||
676 | } else | ||
677 | seq_printf(m, "%10d\t", state); | ||
678 | } | ||
679 | if (unknown == 0) { | ||
680 | seq_printf(m, "%s\t", ppc_rtas_process_error(error)); | ||
681 | get_location_code(m, s, loc); | ||
682 | } | ||
683 | } | ||
684 | |||
685 | /* ****************************************************************** */ | ||
686 | |||
687 | static void check_location(struct seq_file *m, char *c) | ||
688 | { | ||
689 | switch (c[0]) { | ||
690 | case LOC_PLANAR: | ||
691 | seq_printf(m, "Planar #%c", c[1]); | ||
692 | break; | ||
693 | case LOC_CPU: | ||
694 | seq_printf(m, "CPU #%c", c[1]); | ||
695 | break; | ||
696 | case LOC_FAN: | ||
697 | seq_printf(m, "Fan #%c", c[1]); | ||
698 | break; | ||
699 | case LOC_RACKMOUNTED: | ||
700 | seq_printf(m, "Rack #%c", c[1]); | ||
701 | break; | ||
702 | case LOC_VOLTAGE: | ||
703 | seq_printf(m, "Voltage #%c", c[1]); | ||
704 | break; | ||
705 | case LOC_LCD: | ||
706 | seq_printf(m, "LCD #%c", c[1]); | ||
707 | break; | ||
708 | case '.': | ||
709 | seq_printf(m, "- %c", c[1]); | ||
710 | break; | ||
711 | default: | ||
712 | seq_printf(m, "Unknown location"); | ||
713 | break; | ||
714 | } | ||
715 | } | ||
716 | |||
717 | |||
718 | /* ****************************************************************** */ | ||
719 | /* | ||
720 | * Format: | ||
721 | * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ] | ||
722 | * the '.' may be an abbrevation | ||
723 | */ | ||
724 | static void check_location_string(struct seq_file *m, char *c) | ||
725 | { | ||
726 | while (*c) { | ||
727 | if (isalpha(*c) || *c == '.') | ||
728 | check_location(m, c); | ||
729 | else if (*c == '/' || *c == '-') | ||
730 | seq_printf(m, " at "); | ||
731 | c++; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | |||
736 | /* ****************************************************************** */ | ||
737 | |||
738 | static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc) | ||
739 | { | ||
740 | if (!loc || !*loc) { | ||
741 | seq_printf(m, "---");/* does not have a location */ | ||
742 | } else { | ||
743 | check_location_string(m, loc); | ||
744 | } | ||
745 | seq_putc(m, ' '); | ||
746 | } | ||
747 | /* ****************************************************************** */ | ||
748 | /* INDICATORS - Tone Frequency */ | ||
749 | /* ****************************************************************** */ | ||
750 | static ssize_t ppc_rtas_tone_freq_write(struct file *file, | ||
751 | const char __user *buf, size_t count, loff_t *ppos) | ||
752 | { | ||
753 | unsigned long freq; | ||
754 | int error = parse_number(buf, count, &freq); | ||
755 | if (error) | ||
756 | return error; | ||
757 | |||
758 | rtas_tone_frequency = freq; /* save it for later */ | ||
759 | error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL, | ||
760 | TONE_FREQUENCY, 0, freq); | ||
761 | if (error) | ||
762 | printk(KERN_WARNING "error: setting tone frequency returned: %s\n", | ||
763 | ppc_rtas_process_error(error)); | ||
764 | return count; | ||
765 | } | ||
766 | /* ****************************************************************** */ | ||
767 | static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v) | ||
768 | { | ||
769 | seq_printf(m, "%lu\n", rtas_tone_frequency); | ||
770 | return 0; | ||
771 | } | ||
772 | /* ****************************************************************** */ | ||
773 | /* INDICATORS - Tone Volume */ | ||
774 | /* ****************************************************************** */ | ||
775 | static ssize_t ppc_rtas_tone_volume_write(struct file *file, | ||
776 | const char __user *buf, size_t count, loff_t *ppos) | ||
777 | { | ||
778 | unsigned long volume; | ||
779 | int error = parse_number(buf, count, &volume); | ||
780 | if (error) | ||
781 | return error; | ||
782 | |||
783 | if (volume > 100) | ||
784 | volume = 100; | ||
785 | |||
786 | rtas_tone_volume = volume; /* save it for later */ | ||
787 | error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL, | ||
788 | TONE_VOLUME, 0, volume); | ||
789 | if (error) | ||
790 | printk(KERN_WARNING "error: setting tone volume returned: %s\n", | ||
791 | ppc_rtas_process_error(error)); | ||
792 | return count; | ||
793 | } | ||
794 | /* ****************************************************************** */ | ||
795 | static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v) | ||
796 | { | ||
797 | seq_printf(m, "%lu\n", rtas_tone_volume); | ||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | #define RMO_READ_BUF_MAX 30 | ||
802 | |||
803 | /* RTAS Userspace access */ | ||
804 | static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v) | ||
805 | { | ||
806 | seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX); | ||
807 | return 0; | ||
808 | } | ||
diff --git a/arch/ppc64/kernel/rtas_flash.c b/arch/ppc64/kernel/rtas_flash.c deleted file mode 100644 index 923e2e201a70..000000000000 --- a/arch/ppc64/kernel/rtas_flash.c +++ /dev/null | |||
@@ -1,725 +0,0 @@ | |||
1 | /* | ||
2 | * c 2001 PPC 64 Team, IBM Corp | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * /proc/ppc64/rtas/firmware_flash interface | ||
10 | * | ||
11 | * This file implements a firmware_flash interface to pump a firmware | ||
12 | * image into the kernel. At reboot time rtas_restart() will see the | ||
13 | * firmware image and flash it as it reboots (see rtas.c). | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <asm/delay.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/rtas.h> | ||
22 | |||
23 | #define MODULE_VERS "1.0" | ||
24 | #define MODULE_NAME "rtas_flash" | ||
25 | |||
26 | #define FIRMWARE_FLASH_NAME "firmware_flash" | ||
27 | #define FIRMWARE_UPDATE_NAME "firmware_update" | ||
28 | #define MANAGE_FLASH_NAME "manage_flash" | ||
29 | #define VALIDATE_FLASH_NAME "validate_flash" | ||
30 | |||
31 | /* General RTAS Status Codes */ | ||
32 | #define RTAS_RC_SUCCESS 0 | ||
33 | #define RTAS_RC_HW_ERR -1 | ||
34 | #define RTAS_RC_BUSY -2 | ||
35 | |||
36 | /* Flash image status values */ | ||
37 | #define FLASH_AUTH -9002 /* RTAS Not Service Authority Partition */ | ||
38 | #define FLASH_NO_OP -1099 /* No operation initiated by user */ | ||
39 | #define FLASH_IMG_SHORT -1005 /* Flash image shorter than expected */ | ||
40 | #define FLASH_IMG_BAD_LEN -1004 /* Bad length value in flash list block */ | ||
41 | #define FLASH_IMG_NULL_DATA -1003 /* Bad data value in flash list block */ | ||
42 | #define FLASH_IMG_READY 0 /* Firmware img ready for flash on reboot */ | ||
43 | |||
44 | /* Manage image status values */ | ||
45 | #define MANAGE_AUTH -9002 /* RTAS Not Service Authority Partition */ | ||
46 | #define MANAGE_ACTIVE_ERR -9001 /* RTAS Cannot Overwrite Active Img */ | ||
47 | #define MANAGE_NO_OP -1099 /* No operation initiated by user */ | ||
48 | #define MANAGE_PARAM_ERR -3 /* RTAS Parameter Error */ | ||
49 | #define MANAGE_HW_ERR -1 /* RTAS Hardware Error */ | ||
50 | |||
51 | /* Validate image status values */ | ||
52 | #define VALIDATE_AUTH -9002 /* RTAS Not Service Authority Partition */ | ||
53 | #define VALIDATE_NO_OP -1099 /* No operation initiated by the user */ | ||
54 | #define VALIDATE_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */ | ||
55 | #define VALIDATE_READY -1001 /* Firmware image ready for validation */ | ||
56 | #define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */ | ||
57 | #define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */ | ||
58 | #define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */ | ||
59 | #define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */ | ||
60 | #define VALIDATE_INVALID_IMG 2 /* Validate Return Status */ | ||
61 | #define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */ | ||
62 | #define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */ | ||
63 | #define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */ | ||
64 | #define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */ | ||
65 | |||
66 | /* ibm,manage-flash-image operation tokens */ | ||
67 | #define RTAS_REJECT_TMP_IMG 0 | ||
68 | #define RTAS_COMMIT_TMP_IMG 1 | ||
69 | |||
70 | /* Array sizes */ | ||
71 | #define VALIDATE_BUF_SIZE 4096 | ||
72 | #define RTAS_MSG_MAXLEN 64 | ||
73 | |||
74 | /* Local copy of the flash block list. | ||
75 | * We only allow one open of the flash proc file and create this | ||
76 | * list as we go. This list will be put in the kernel's | ||
77 | * rtas_firmware_flash_list global var once it is fully read. | ||
78 | * | ||
79 | * For convenience as we build the list we use virtual addrs, | ||
80 | * we do not fill in the version number, and the length field | ||
81 | * is treated as the number of entries currently in the block | ||
82 | * (i.e. not a byte count). This is all fixed on release. | ||
83 | */ | ||
84 | |||
85 | /* Status int must be first member of struct */ | ||
86 | struct rtas_update_flash_t | ||
87 | { | ||
88 | int status; /* Flash update status */ | ||
89 | struct flash_block_list *flist; /* Local copy of flash block list */ | ||
90 | }; | ||
91 | |||
92 | /* Status int must be first member of struct */ | ||
93 | struct rtas_manage_flash_t | ||
94 | { | ||
95 | int status; /* Returned status */ | ||
96 | unsigned int op; /* Reject or commit image */ | ||
97 | }; | ||
98 | |||
99 | /* Status int must be first member of struct */ | ||
100 | struct rtas_validate_flash_t | ||
101 | { | ||
102 | int status; /* Returned status */ | ||
103 | char buf[VALIDATE_BUF_SIZE]; /* Candidate image buffer */ | ||
104 | unsigned int buf_size; /* Size of image buf */ | ||
105 | unsigned int update_results; /* Update results token */ | ||
106 | }; | ||
107 | |||
108 | static DEFINE_SPINLOCK(flash_file_open_lock); | ||
109 | static struct proc_dir_entry *firmware_flash_pde; | ||
110 | static struct proc_dir_entry *firmware_update_pde; | ||
111 | static struct proc_dir_entry *validate_pde; | ||
112 | static struct proc_dir_entry *manage_pde; | ||
113 | |||
114 | /* Do simple sanity checks on the flash image. */ | ||
115 | static int flash_list_valid(struct flash_block_list *flist) | ||
116 | { | ||
117 | struct flash_block_list *f; | ||
118 | int i; | ||
119 | unsigned long block_size, image_size; | ||
120 | |||
121 | /* Paranoid self test here. We also collect the image size. */ | ||
122 | image_size = 0; | ||
123 | for (f = flist; f; f = f->next) { | ||
124 | for (i = 0; i < f->num_blocks; i++) { | ||
125 | if (f->blocks[i].data == NULL) { | ||
126 | return FLASH_IMG_NULL_DATA; | ||
127 | } | ||
128 | block_size = f->blocks[i].length; | ||
129 | if (block_size <= 0 || block_size > PAGE_SIZE) { | ||
130 | return FLASH_IMG_BAD_LEN; | ||
131 | } | ||
132 | image_size += block_size; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | if (image_size < (256 << 10)) { | ||
137 | if (image_size < 2) | ||
138 | return FLASH_NO_OP; | ||
139 | } | ||
140 | |||
141 | printk(KERN_INFO "FLASH: flash image with %ld bytes stored for hardware flash on reboot\n", image_size); | ||
142 | |||
143 | return FLASH_IMG_READY; | ||
144 | } | ||
145 | |||
146 | static void free_flash_list(struct flash_block_list *f) | ||
147 | { | ||
148 | struct flash_block_list *next; | ||
149 | int i; | ||
150 | |||
151 | while (f) { | ||
152 | for (i = 0; i < f->num_blocks; i++) | ||
153 | free_page((unsigned long)(f->blocks[i].data)); | ||
154 | next = f->next; | ||
155 | free_page((unsigned long)f); | ||
156 | f = next; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static int rtas_flash_release(struct inode *inode, struct file *file) | ||
161 | { | ||
162 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
163 | struct rtas_update_flash_t *uf; | ||
164 | |||
165 | uf = (struct rtas_update_flash_t *) dp->data; | ||
166 | if (uf->flist) { | ||
167 | /* File was opened in write mode for a new flash attempt */ | ||
168 | /* Clear saved list */ | ||
169 | if (rtas_firmware_flash_list.next) { | ||
170 | free_flash_list(rtas_firmware_flash_list.next); | ||
171 | rtas_firmware_flash_list.next = NULL; | ||
172 | } | ||
173 | |||
174 | if (uf->status != FLASH_AUTH) | ||
175 | uf->status = flash_list_valid(uf->flist); | ||
176 | |||
177 | if (uf->status == FLASH_IMG_READY) | ||
178 | rtas_firmware_flash_list.next = uf->flist; | ||
179 | else | ||
180 | free_flash_list(uf->flist); | ||
181 | |||
182 | uf->flist = NULL; | ||
183 | } | ||
184 | |||
185 | atomic_dec(&dp->count); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static void get_flash_status_msg(int status, char *buf) | ||
190 | { | ||
191 | char *msg; | ||
192 | |||
193 | switch (status) { | ||
194 | case FLASH_AUTH: | ||
195 | msg = "error: this partition does not have service authority\n"; | ||
196 | break; | ||
197 | case FLASH_NO_OP: | ||
198 | msg = "info: no firmware image for flash\n"; | ||
199 | break; | ||
200 | case FLASH_IMG_SHORT: | ||
201 | msg = "error: flash image short\n"; | ||
202 | break; | ||
203 | case FLASH_IMG_BAD_LEN: | ||
204 | msg = "error: internal error bad length\n"; | ||
205 | break; | ||
206 | case FLASH_IMG_NULL_DATA: | ||
207 | msg = "error: internal error null data\n"; | ||
208 | break; | ||
209 | case FLASH_IMG_READY: | ||
210 | msg = "ready: firmware image ready for flash on reboot\n"; | ||
211 | break; | ||
212 | default: | ||
213 | sprintf(buf, "error: unexpected status value %d\n", status); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | strcpy(buf, msg); | ||
218 | } | ||
219 | |||
220 | /* Reading the proc file will show status (not the firmware contents) */ | ||
221 | static ssize_t rtas_flash_read(struct file *file, char __user *buf, | ||
222 | size_t count, loff_t *ppos) | ||
223 | { | ||
224 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
225 | struct rtas_update_flash_t *uf; | ||
226 | char msg[RTAS_MSG_MAXLEN]; | ||
227 | int msglen; | ||
228 | |||
229 | uf = (struct rtas_update_flash_t *) dp->data; | ||
230 | |||
231 | if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { | ||
232 | get_flash_status_msg(uf->status, msg); | ||
233 | } else { /* FIRMWARE_UPDATE_NAME */ | ||
234 | sprintf(msg, "%d\n", uf->status); | ||
235 | } | ||
236 | msglen = strlen(msg); | ||
237 | if (msglen > count) | ||
238 | msglen = count; | ||
239 | |||
240 | if (ppos && *ppos != 0) | ||
241 | return 0; /* be cheap */ | ||
242 | |||
243 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
244 | return -EINVAL; | ||
245 | |||
246 | if (copy_to_user(buf, msg, msglen)) | ||
247 | return -EFAULT; | ||
248 | |||
249 | if (ppos) | ||
250 | *ppos = msglen; | ||
251 | return msglen; | ||
252 | } | ||
253 | |||
254 | /* We could be much more efficient here. But to keep this function | ||
255 | * simple we allocate a page to the block list no matter how small the | ||
256 | * count is. If the system is low on memory it will be just as well | ||
257 | * that we fail.... | ||
258 | */ | ||
259 | static ssize_t rtas_flash_write(struct file *file, const char __user *buffer, | ||
260 | size_t count, loff_t *off) | ||
261 | { | ||
262 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
263 | struct rtas_update_flash_t *uf; | ||
264 | char *p; | ||
265 | int next_free; | ||
266 | struct flash_block_list *fl; | ||
267 | |||
268 | uf = (struct rtas_update_flash_t *) dp->data; | ||
269 | |||
270 | if (uf->status == FLASH_AUTH || count == 0) | ||
271 | return count; /* discard data */ | ||
272 | |||
273 | /* In the case that the image is not ready for flashing, the memory | ||
274 | * allocated for the block list will be freed upon the release of the | ||
275 | * proc file | ||
276 | */ | ||
277 | if (uf->flist == NULL) { | ||
278 | uf->flist = (struct flash_block_list *) get_zeroed_page(GFP_KERNEL); | ||
279 | if (!uf->flist) | ||
280 | return -ENOMEM; | ||
281 | } | ||
282 | |||
283 | fl = uf->flist; | ||
284 | while (fl->next) | ||
285 | fl = fl->next; /* seek to last block_list for append */ | ||
286 | next_free = fl->num_blocks; | ||
287 | if (next_free == FLASH_BLOCKS_PER_NODE) { | ||
288 | /* Need to allocate another block_list */ | ||
289 | fl->next = (struct flash_block_list *)get_zeroed_page(GFP_KERNEL); | ||
290 | if (!fl->next) | ||
291 | return -ENOMEM; | ||
292 | fl = fl->next; | ||
293 | next_free = 0; | ||
294 | } | ||
295 | |||
296 | if (count > PAGE_SIZE) | ||
297 | count = PAGE_SIZE; | ||
298 | p = (char *)get_zeroed_page(GFP_KERNEL); | ||
299 | if (!p) | ||
300 | return -ENOMEM; | ||
301 | |||
302 | if(copy_from_user(p, buffer, count)) { | ||
303 | free_page((unsigned long)p); | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | fl->blocks[next_free].data = p; | ||
307 | fl->blocks[next_free].length = count; | ||
308 | fl->num_blocks++; | ||
309 | |||
310 | return count; | ||
311 | } | ||
312 | |||
313 | static int rtas_excl_open(struct inode *inode, struct file *file) | ||
314 | { | ||
315 | struct proc_dir_entry *dp = PDE(inode); | ||
316 | |||
317 | /* Enforce exclusive open with use count of PDE */ | ||
318 | spin_lock(&flash_file_open_lock); | ||
319 | if (atomic_read(&dp->count) > 1) { | ||
320 | spin_unlock(&flash_file_open_lock); | ||
321 | return -EBUSY; | ||
322 | } | ||
323 | |||
324 | atomic_inc(&dp->count); | ||
325 | spin_unlock(&flash_file_open_lock); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static int rtas_excl_release(struct inode *inode, struct file *file) | ||
331 | { | ||
332 | struct proc_dir_entry *dp = PDE(inode); | ||
333 | |||
334 | atomic_dec(&dp->count); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static void manage_flash(struct rtas_manage_flash_t *args_buf) | ||
340 | { | ||
341 | unsigned int wait_time; | ||
342 | s32 rc; | ||
343 | |||
344 | while (1) { | ||
345 | rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1, | ||
346 | 1, NULL, args_buf->op); | ||
347 | if (rc == RTAS_RC_BUSY) | ||
348 | udelay(1); | ||
349 | else if (rtas_is_extended_busy(rc)) { | ||
350 | wait_time = rtas_extended_busy_delay_time(rc); | ||
351 | udelay(wait_time * 1000); | ||
352 | } else | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | args_buf->status = rc; | ||
357 | } | ||
358 | |||
359 | static ssize_t manage_flash_read(struct file *file, char __user *buf, | ||
360 | size_t count, loff_t *ppos) | ||
361 | { | ||
362 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
363 | struct rtas_manage_flash_t *args_buf; | ||
364 | char msg[RTAS_MSG_MAXLEN]; | ||
365 | int msglen; | ||
366 | |||
367 | args_buf = (struct rtas_manage_flash_t *) dp->data; | ||
368 | if (args_buf == NULL) | ||
369 | return 0; | ||
370 | |||
371 | msglen = sprintf(msg, "%d\n", args_buf->status); | ||
372 | if (msglen > count) | ||
373 | msglen = count; | ||
374 | |||
375 | if (ppos && *ppos != 0) | ||
376 | return 0; /* be cheap */ | ||
377 | |||
378 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
379 | return -EINVAL; | ||
380 | |||
381 | if (copy_to_user(buf, msg, msglen)) | ||
382 | return -EFAULT; | ||
383 | |||
384 | if (ppos) | ||
385 | *ppos = msglen; | ||
386 | return msglen; | ||
387 | } | ||
388 | |||
389 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, | ||
390 | size_t count, loff_t *off) | ||
391 | { | ||
392 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
393 | struct rtas_manage_flash_t *args_buf; | ||
394 | const char reject_str[] = "0"; | ||
395 | const char commit_str[] = "1"; | ||
396 | char stkbuf[10]; | ||
397 | int op; | ||
398 | |||
399 | args_buf = (struct rtas_manage_flash_t *) dp->data; | ||
400 | if ((args_buf->status == MANAGE_AUTH) || (count == 0)) | ||
401 | return count; | ||
402 | |||
403 | op = -1; | ||
404 | if (buf) { | ||
405 | if (count > 9) count = 9; | ||
406 | if (copy_from_user (stkbuf, buf, count)) { | ||
407 | return -EFAULT; | ||
408 | } | ||
409 | if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0) | ||
410 | op = RTAS_REJECT_TMP_IMG; | ||
411 | else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0) | ||
412 | op = RTAS_COMMIT_TMP_IMG; | ||
413 | } | ||
414 | |||
415 | if (op == -1) /* buf is empty, or contains invalid string */ | ||
416 | return -EINVAL; | ||
417 | |||
418 | args_buf->op = op; | ||
419 | manage_flash(args_buf); | ||
420 | |||
421 | return count; | ||
422 | } | ||
423 | |||
424 | static void validate_flash(struct rtas_validate_flash_t *args_buf) | ||
425 | { | ||
426 | int token = rtas_token("ibm,validate-flash-image"); | ||
427 | unsigned int wait_time; | ||
428 | int update_results; | ||
429 | s32 rc; | ||
430 | |||
431 | rc = 0; | ||
432 | while(1) { | ||
433 | spin_lock(&rtas_data_buf_lock); | ||
434 | memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE); | ||
435 | rc = rtas_call(token, 2, 2, &update_results, | ||
436 | (u32) __pa(rtas_data_buf), args_buf->buf_size); | ||
437 | memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE); | ||
438 | spin_unlock(&rtas_data_buf_lock); | ||
439 | |||
440 | if (rc == RTAS_RC_BUSY) | ||
441 | udelay(1); | ||
442 | else if (rtas_is_extended_busy(rc)) { | ||
443 | wait_time = rtas_extended_busy_delay_time(rc); | ||
444 | udelay(wait_time * 1000); | ||
445 | } else | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | args_buf->status = rc; | ||
450 | args_buf->update_results = update_results; | ||
451 | } | ||
452 | |||
453 | static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | ||
454 | char *msg) | ||
455 | { | ||
456 | int n; | ||
457 | |||
458 | if (args_buf->status >= VALIDATE_TMP_UPDATE) { | ||
459 | n = sprintf(msg, "%d\n", args_buf->update_results); | ||
460 | if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || | ||
461 | (args_buf->update_results == VALIDATE_TMP_UPDATE)) | ||
462 | n += sprintf(msg + n, "%s\n", args_buf->buf); | ||
463 | } else { | ||
464 | n = sprintf(msg, "%d\n", args_buf->status); | ||
465 | } | ||
466 | return n; | ||
467 | } | ||
468 | |||
469 | static ssize_t validate_flash_read(struct file *file, char __user *buf, | ||
470 | size_t count, loff_t *ppos) | ||
471 | { | ||
472 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
473 | struct rtas_validate_flash_t *args_buf; | ||
474 | char msg[RTAS_MSG_MAXLEN]; | ||
475 | int msglen; | ||
476 | |||
477 | args_buf = (struct rtas_validate_flash_t *) dp->data; | ||
478 | |||
479 | if (ppos && *ppos != 0) | ||
480 | return 0; /* be cheap */ | ||
481 | |||
482 | msglen = get_validate_flash_msg(args_buf, msg); | ||
483 | if (msglen > count) | ||
484 | msglen = count; | ||
485 | |||
486 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
487 | return -EINVAL; | ||
488 | |||
489 | if (copy_to_user(buf, msg, msglen)) | ||
490 | return -EFAULT; | ||
491 | |||
492 | if (ppos) | ||
493 | *ppos = msglen; | ||
494 | return msglen; | ||
495 | } | ||
496 | |||
497 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, | ||
498 | size_t count, loff_t *off) | ||
499 | { | ||
500 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
501 | struct rtas_validate_flash_t *args_buf; | ||
502 | int rc; | ||
503 | |||
504 | args_buf = (struct rtas_validate_flash_t *) dp->data; | ||
505 | |||
506 | if (dp->data == NULL) { | ||
507 | dp->data = kmalloc(sizeof(struct rtas_validate_flash_t), | ||
508 | GFP_KERNEL); | ||
509 | if (dp->data == NULL) | ||
510 | return -ENOMEM; | ||
511 | } | ||
512 | |||
513 | /* We are only interested in the first 4K of the | ||
514 | * candidate image */ | ||
515 | if ((*off >= VALIDATE_BUF_SIZE) || | ||
516 | (args_buf->status == VALIDATE_AUTH)) { | ||
517 | *off += count; | ||
518 | return count; | ||
519 | } | ||
520 | |||
521 | if (*off + count >= VALIDATE_BUF_SIZE) { | ||
522 | count = VALIDATE_BUF_SIZE - *off; | ||
523 | args_buf->status = VALIDATE_READY; | ||
524 | } else { | ||
525 | args_buf->status = VALIDATE_INCOMPLETE; | ||
526 | } | ||
527 | |||
528 | if (!access_ok(VERIFY_READ, buf, count)) { | ||
529 | rc = -EFAULT; | ||
530 | goto done; | ||
531 | } | ||
532 | if (copy_from_user(args_buf->buf + *off, buf, count)) { | ||
533 | rc = -EFAULT; | ||
534 | goto done; | ||
535 | } | ||
536 | |||
537 | *off += count; | ||
538 | rc = count; | ||
539 | done: | ||
540 | if (rc < 0) { | ||
541 | kfree(dp->data); | ||
542 | dp->data = NULL; | ||
543 | } | ||
544 | return rc; | ||
545 | } | ||
546 | |||
547 | static int validate_flash_release(struct inode *inode, struct file *file) | ||
548 | { | ||
549 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
550 | struct rtas_validate_flash_t *args_buf; | ||
551 | |||
552 | args_buf = (struct rtas_validate_flash_t *) dp->data; | ||
553 | |||
554 | if (args_buf->status == VALIDATE_READY) { | ||
555 | args_buf->buf_size = VALIDATE_BUF_SIZE; | ||
556 | validate_flash(args_buf); | ||
557 | } | ||
558 | |||
559 | /* The matching atomic_inc was in rtas_excl_open() */ | ||
560 | atomic_dec(&dp->count); | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static void remove_flash_pde(struct proc_dir_entry *dp) | ||
566 | { | ||
567 | if (dp) { | ||
568 | if (dp->data != NULL) | ||
569 | kfree(dp->data); | ||
570 | dp->owner = NULL; | ||
571 | remove_proc_entry(dp->name, dp->parent); | ||
572 | } | ||
573 | } | ||
574 | |||
575 | static int initialize_flash_pde_data(const char *rtas_call_name, | ||
576 | size_t buf_size, | ||
577 | struct proc_dir_entry *dp) | ||
578 | { | ||
579 | int *status; | ||
580 | int token; | ||
581 | |||
582 | dp->data = kmalloc(buf_size, GFP_KERNEL); | ||
583 | if (dp->data == NULL) { | ||
584 | remove_flash_pde(dp); | ||
585 | return -ENOMEM; | ||
586 | } | ||
587 | |||
588 | memset(dp->data, 0, buf_size); | ||
589 | |||
590 | /* | ||
591 | * This code assumes that the status int is the first member of the | ||
592 | * struct | ||
593 | */ | ||
594 | status = (int *) dp->data; | ||
595 | token = rtas_token(rtas_call_name); | ||
596 | if (token == RTAS_UNKNOWN_SERVICE) | ||
597 | *status = FLASH_AUTH; | ||
598 | else | ||
599 | *status = FLASH_NO_OP; | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static struct proc_dir_entry *create_flash_pde(const char *filename, | ||
605 | struct file_operations *fops) | ||
606 | { | ||
607 | struct proc_dir_entry *ent = NULL; | ||
608 | |||
609 | ent = create_proc_entry(filename, S_IRUSR | S_IWUSR, NULL); | ||
610 | if (ent != NULL) { | ||
611 | ent->nlink = 1; | ||
612 | ent->proc_fops = fops; | ||
613 | ent->owner = THIS_MODULE; | ||
614 | } | ||
615 | |||
616 | return ent; | ||
617 | } | ||
618 | |||
619 | static struct file_operations rtas_flash_operations = { | ||
620 | .read = rtas_flash_read, | ||
621 | .write = rtas_flash_write, | ||
622 | .open = rtas_excl_open, | ||
623 | .release = rtas_flash_release, | ||
624 | }; | ||
625 | |||
626 | static struct file_operations manage_flash_operations = { | ||
627 | .read = manage_flash_read, | ||
628 | .write = manage_flash_write, | ||
629 | .open = rtas_excl_open, | ||
630 | .release = rtas_excl_release, | ||
631 | }; | ||
632 | |||
633 | static struct file_operations validate_flash_operations = { | ||
634 | .read = validate_flash_read, | ||
635 | .write = validate_flash_write, | ||
636 | .open = rtas_excl_open, | ||
637 | .release = validate_flash_release, | ||
638 | }; | ||
639 | |||
640 | int __init rtas_flash_init(void) | ||
641 | { | ||
642 | int rc; | ||
643 | |||
644 | if (rtas_token("ibm,update-flash-64-and-reboot") == | ||
645 | RTAS_UNKNOWN_SERVICE) { | ||
646 | printk(KERN_ERR "rtas_flash: no firmware flash support\n"); | ||
647 | return 1; | ||
648 | } | ||
649 | |||
650 | firmware_flash_pde = create_flash_pde("ppc64/rtas/" | ||
651 | FIRMWARE_FLASH_NAME, | ||
652 | &rtas_flash_operations); | ||
653 | if (firmware_flash_pde == NULL) { | ||
654 | rc = -ENOMEM; | ||
655 | goto cleanup; | ||
656 | } | ||
657 | |||
658 | rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot", | ||
659 | sizeof(struct rtas_update_flash_t), | ||
660 | firmware_flash_pde); | ||
661 | if (rc != 0) | ||
662 | goto cleanup; | ||
663 | |||
664 | firmware_update_pde = create_flash_pde("ppc64/rtas/" | ||
665 | FIRMWARE_UPDATE_NAME, | ||
666 | &rtas_flash_operations); | ||
667 | if (firmware_update_pde == NULL) { | ||
668 | rc = -ENOMEM; | ||
669 | goto cleanup; | ||
670 | } | ||
671 | |||
672 | rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot", | ||
673 | sizeof(struct rtas_update_flash_t), | ||
674 | firmware_update_pde); | ||
675 | if (rc != 0) | ||
676 | goto cleanup; | ||
677 | |||
678 | validate_pde = create_flash_pde("ppc64/rtas/" VALIDATE_FLASH_NAME, | ||
679 | &validate_flash_operations); | ||
680 | if (validate_pde == NULL) { | ||
681 | rc = -ENOMEM; | ||
682 | goto cleanup; | ||
683 | } | ||
684 | |||
685 | rc = initialize_flash_pde_data("ibm,validate-flash-image", | ||
686 | sizeof(struct rtas_validate_flash_t), | ||
687 | validate_pde); | ||
688 | if (rc != 0) | ||
689 | goto cleanup; | ||
690 | |||
691 | manage_pde = create_flash_pde("ppc64/rtas/" MANAGE_FLASH_NAME, | ||
692 | &manage_flash_operations); | ||
693 | if (manage_pde == NULL) { | ||
694 | rc = -ENOMEM; | ||
695 | goto cleanup; | ||
696 | } | ||
697 | |||
698 | rc = initialize_flash_pde_data("ibm,manage-flash-image", | ||
699 | sizeof(struct rtas_manage_flash_t), | ||
700 | manage_pde); | ||
701 | if (rc != 0) | ||
702 | goto cleanup; | ||
703 | |||
704 | return 0; | ||
705 | |||
706 | cleanup: | ||
707 | remove_flash_pde(firmware_flash_pde); | ||
708 | remove_flash_pde(firmware_update_pde); | ||
709 | remove_flash_pde(validate_pde); | ||
710 | remove_flash_pde(manage_pde); | ||
711 | |||
712 | return rc; | ||
713 | } | ||
714 | |||
715 | void __exit rtas_flash_cleanup(void) | ||
716 | { | ||
717 | remove_flash_pde(firmware_flash_pde); | ||
718 | remove_flash_pde(firmware_update_pde); | ||
719 | remove_flash_pde(validate_pde); | ||
720 | remove_flash_pde(manage_pde); | ||
721 | } | ||
722 | |||
723 | module_init(rtas_flash_init); | ||
724 | module_exit(rtas_flash_cleanup); | ||
725 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/ppc64/kernel/rtasd.c b/arch/ppc64/kernel/rtasd.c deleted file mode 100644 index e26b0420b6dd..000000000000 --- a/arch/ppc64/kernel/rtasd.c +++ /dev/null | |||
@@ -1,527 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Communication to userspace based on kernel/printk.c | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/delay.h> | ||
23 | |||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/rtas.h> | ||
27 | #include <asm/prom.h> | ||
28 | #include <asm/nvram.h> | ||
29 | #include <asm/atomic.h> | ||
30 | #include <asm/systemcfg.h> | ||
31 | |||
32 | #if 0 | ||
33 | #define DEBUG(A...) printk(KERN_ERR A) | ||
34 | #else | ||
35 | #define DEBUG(A...) | ||
36 | #endif | ||
37 | |||
38 | static DEFINE_SPINLOCK(rtasd_log_lock); | ||
39 | |||
40 | DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait); | ||
41 | |||
42 | static char *rtas_log_buf; | ||
43 | static unsigned long rtas_log_start; | ||
44 | static unsigned long rtas_log_size; | ||
45 | |||
46 | static int surveillance_timeout = -1; | ||
47 | static unsigned int rtas_event_scan_rate; | ||
48 | static unsigned int rtas_error_log_max; | ||
49 | static unsigned int rtas_error_log_buffer_max; | ||
50 | |||
51 | static int full_rtas_msgs = 0; | ||
52 | |||
53 | extern int no_logging; | ||
54 | |||
55 | volatile int error_log_cnt = 0; | ||
56 | |||
57 | /* | ||
58 | * Since we use 32 bit RTAS, the physical address of this must be below | ||
59 | * 4G or else bad things happen. Allocate this in the kernel data and | ||
60 | * make it big enough. | ||
61 | */ | ||
62 | static unsigned char logdata[RTAS_ERROR_LOG_MAX]; | ||
63 | |||
64 | static int get_eventscan_parms(void); | ||
65 | |||
66 | static char *rtas_type[] = { | ||
67 | "Unknown", "Retry", "TCE Error", "Internal Device Failure", | ||
68 | "Timeout", "Data Parity", "Address Parity", "Cache Parity", | ||
69 | "Address Invalid", "ECC Uncorrected", "ECC Corrupted", | ||
70 | }; | ||
71 | |||
72 | static char *rtas_event_type(int type) | ||
73 | { | ||
74 | if ((type > 0) && (type < 11)) | ||
75 | return rtas_type[type]; | ||
76 | |||
77 | switch (type) { | ||
78 | case RTAS_TYPE_EPOW: | ||
79 | return "EPOW"; | ||
80 | case RTAS_TYPE_PLATFORM: | ||
81 | return "Platform Error"; | ||
82 | case RTAS_TYPE_IO: | ||
83 | return "I/O Event"; | ||
84 | case RTAS_TYPE_INFO: | ||
85 | return "Platform Information Event"; | ||
86 | case RTAS_TYPE_DEALLOC: | ||
87 | return "Resource Deallocation Event"; | ||
88 | case RTAS_TYPE_DUMP: | ||
89 | return "Dump Notification Event"; | ||
90 | } | ||
91 | |||
92 | return rtas_type[0]; | ||
93 | } | ||
94 | |||
95 | /* To see this info, grep RTAS /var/log/messages and each entry | ||
96 | * will be collected together with obvious begin/end. | ||
97 | * There will be a unique identifier on the begin and end lines. | ||
98 | * This will persist across reboots. | ||
99 | * | ||
100 | * format of error logs returned from RTAS: | ||
101 | * bytes (size) : contents | ||
102 | * -------------------------------------------------------- | ||
103 | * 0-7 (8) : rtas_error_log | ||
104 | * 8-47 (40) : extended info | ||
105 | * 48-51 (4) : vendor id | ||
106 | * 52-1023 (vendor specific) : location code and debug data | ||
107 | */ | ||
108 | static void printk_log_rtas(char *buf, int len) | ||
109 | { | ||
110 | |||
111 | int i,j,n = 0; | ||
112 | int perline = 16; | ||
113 | char buffer[64]; | ||
114 | char * str = "RTAS event"; | ||
115 | |||
116 | if (full_rtas_msgs) { | ||
117 | printk(RTAS_DEBUG "%d -------- %s begin --------\n", | ||
118 | error_log_cnt, str); | ||
119 | |||
120 | /* | ||
121 | * Print perline bytes on each line, each line will start | ||
122 | * with RTAS and a changing number, so syslogd will | ||
123 | * print lines that are otherwise the same. Separate every | ||
124 | * 4 bytes with a space. | ||
125 | */ | ||
126 | for (i = 0; i < len; i++) { | ||
127 | j = i % perline; | ||
128 | if (j == 0) { | ||
129 | memset(buffer, 0, sizeof(buffer)); | ||
130 | n = sprintf(buffer, "RTAS %d:", i/perline); | ||
131 | } | ||
132 | |||
133 | if ((i % 4) == 0) | ||
134 | n += sprintf(buffer+n, " "); | ||
135 | |||
136 | n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]); | ||
137 | |||
138 | if (j == (perline-1)) | ||
139 | printk(KERN_DEBUG "%s\n", buffer); | ||
140 | } | ||
141 | if ((i % perline) != 0) | ||
142 | printk(KERN_DEBUG "%s\n", buffer); | ||
143 | |||
144 | printk(RTAS_DEBUG "%d -------- %s end ----------\n", | ||
145 | error_log_cnt, str); | ||
146 | } else { | ||
147 | struct rtas_error_log *errlog = (struct rtas_error_log *)buf; | ||
148 | |||
149 | printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", | ||
150 | error_log_cnt, rtas_event_type(errlog->type), | ||
151 | errlog->severity); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | static int log_rtas_len(char * buf) | ||
156 | { | ||
157 | int len; | ||
158 | struct rtas_error_log *err; | ||
159 | |||
160 | /* rtas fixed header */ | ||
161 | len = 8; | ||
162 | err = (struct rtas_error_log *)buf; | ||
163 | if (err->extended_log_length) { | ||
164 | |||
165 | /* extended header */ | ||
166 | len += err->extended_log_length; | ||
167 | } | ||
168 | |||
169 | if (rtas_error_log_max == 0) { | ||
170 | get_eventscan_parms(); | ||
171 | } | ||
172 | if (len > rtas_error_log_max) | ||
173 | len = rtas_error_log_max; | ||
174 | |||
175 | return len; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * First write to nvram, if fatal error, that is the only | ||
180 | * place we log the info. The error will be picked up | ||
181 | * on the next reboot by rtasd. If not fatal, run the | ||
182 | * method for the type of error. Currently, only RTAS | ||
183 | * errors have methods implemented, but in the future | ||
184 | * there might be a need to store data in nvram before a | ||
185 | * call to panic(). | ||
186 | * | ||
187 | * XXX We write to nvram periodically, to indicate error has | ||
188 | * been written and sync'd, but there is a possibility | ||
189 | * that if we don't shutdown correctly, a duplicate error | ||
190 | * record will be created on next reboot. | ||
191 | */ | ||
192 | void pSeries_log_error(char *buf, unsigned int err_type, int fatal) | ||
193 | { | ||
194 | unsigned long offset; | ||
195 | unsigned long s; | ||
196 | int len = 0; | ||
197 | |||
198 | DEBUG("logging event\n"); | ||
199 | if (buf == NULL) | ||
200 | return; | ||
201 | |||
202 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
203 | |||
204 | /* get length and increase count */ | ||
205 | switch (err_type & ERR_TYPE_MASK) { | ||
206 | case ERR_TYPE_RTAS_LOG: | ||
207 | len = log_rtas_len(buf); | ||
208 | if (!(err_type & ERR_FLAG_BOOT)) | ||
209 | error_log_cnt++; | ||
210 | break; | ||
211 | case ERR_TYPE_KERNEL_PANIC: | ||
212 | default: | ||
213 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | /* Write error to NVRAM */ | ||
218 | if (!no_logging && !(err_type & ERR_FLAG_BOOT)) | ||
219 | nvram_write_error_log(buf, len, err_type); | ||
220 | |||
221 | /* | ||
222 | * rtas errors can occur during boot, and we do want to capture | ||
223 | * those somewhere, even if nvram isn't ready (why not?), and even | ||
224 | * if rtasd isn't ready. Put them into the boot log, at least. | ||
225 | */ | ||
226 | if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG) | ||
227 | printk_log_rtas(buf, len); | ||
228 | |||
229 | /* Check to see if we need to or have stopped logging */ | ||
230 | if (fatal || no_logging) { | ||
231 | no_logging = 1; | ||
232 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
233 | return; | ||
234 | } | ||
235 | |||
236 | /* call type specific method for error */ | ||
237 | switch (err_type & ERR_TYPE_MASK) { | ||
238 | case ERR_TYPE_RTAS_LOG: | ||
239 | offset = rtas_error_log_buffer_max * | ||
240 | ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK); | ||
241 | |||
242 | /* First copy over sequence number */ | ||
243 | memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int)); | ||
244 | |||
245 | /* Second copy over error log data */ | ||
246 | offset += sizeof(int); | ||
247 | memcpy(&rtas_log_buf[offset], buf, len); | ||
248 | |||
249 | if (rtas_log_size < LOG_NUMBER) | ||
250 | rtas_log_size += 1; | ||
251 | else | ||
252 | rtas_log_start += 1; | ||
253 | |||
254 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
255 | wake_up_interruptible(&rtas_log_wait); | ||
256 | break; | ||
257 | case ERR_TYPE_KERNEL_PANIC: | ||
258 | default: | ||
259 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | } | ||
264 | |||
265 | |||
266 | static int rtas_log_open(struct inode * inode, struct file * file) | ||
267 | { | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static int rtas_log_release(struct inode * inode, struct file * file) | ||
272 | { | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | /* This will check if all events are logged, if they are then, we | ||
277 | * know that we can safely clear the events in NVRAM. | ||
278 | * Next we'll sit and wait for something else to log. | ||
279 | */ | ||
280 | static ssize_t rtas_log_read(struct file * file, char __user * buf, | ||
281 | size_t count, loff_t *ppos) | ||
282 | { | ||
283 | int error; | ||
284 | char *tmp; | ||
285 | unsigned long s; | ||
286 | unsigned long offset; | ||
287 | |||
288 | if (!buf || count < rtas_error_log_buffer_max) | ||
289 | return -EINVAL; | ||
290 | |||
291 | count = rtas_error_log_buffer_max; | ||
292 | |||
293 | if (!access_ok(VERIFY_WRITE, buf, count)) | ||
294 | return -EFAULT; | ||
295 | |||
296 | tmp = kmalloc(count, GFP_KERNEL); | ||
297 | if (!tmp) | ||
298 | return -ENOMEM; | ||
299 | |||
300 | |||
301 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
302 | /* if it's 0, then we know we got the last one (the one in NVRAM) */ | ||
303 | if (rtas_log_size == 0 && !no_logging) | ||
304 | nvram_clear_error_log(); | ||
305 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
306 | |||
307 | |||
308 | error = wait_event_interruptible(rtas_log_wait, rtas_log_size); | ||
309 | if (error) | ||
310 | goto out; | ||
311 | |||
312 | spin_lock_irqsave(&rtasd_log_lock, s); | ||
313 | offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK); | ||
314 | memcpy(tmp, &rtas_log_buf[offset], count); | ||
315 | |||
316 | rtas_log_start += 1; | ||
317 | rtas_log_size -= 1; | ||
318 | spin_unlock_irqrestore(&rtasd_log_lock, s); | ||
319 | |||
320 | error = copy_to_user(buf, tmp, count) ? -EFAULT : count; | ||
321 | out: | ||
322 | kfree(tmp); | ||
323 | return error; | ||
324 | } | ||
325 | |||
326 | static unsigned int rtas_log_poll(struct file *file, poll_table * wait) | ||
327 | { | ||
328 | poll_wait(file, &rtas_log_wait, wait); | ||
329 | if (rtas_log_size) | ||
330 | return POLLIN | POLLRDNORM; | ||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | struct file_operations proc_rtas_log_operations = { | ||
335 | .read = rtas_log_read, | ||
336 | .poll = rtas_log_poll, | ||
337 | .open = rtas_log_open, | ||
338 | .release = rtas_log_release, | ||
339 | }; | ||
340 | |||
341 | static int enable_surveillance(int timeout) | ||
342 | { | ||
343 | int error; | ||
344 | |||
345 | error = rtas_set_indicator(SURVEILLANCE_TOKEN, 0, timeout); | ||
346 | |||
347 | if (error == 0) | ||
348 | return 0; | ||
349 | |||
350 | if (error == -EINVAL) { | ||
351 | printk(KERN_INFO "rtasd: surveillance not supported\n"); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | printk(KERN_ERR "rtasd: could not update surveillance\n"); | ||
356 | return -1; | ||
357 | } | ||
358 | |||
359 | static int get_eventscan_parms(void) | ||
360 | { | ||
361 | struct device_node *node; | ||
362 | int *ip; | ||
363 | |||
364 | node = of_find_node_by_path("/rtas"); | ||
365 | |||
366 | ip = (int *)get_property(node, "rtas-event-scan-rate", NULL); | ||
367 | if (ip == NULL) { | ||
368 | printk(KERN_ERR "rtasd: no rtas-event-scan-rate\n"); | ||
369 | of_node_put(node); | ||
370 | return -1; | ||
371 | } | ||
372 | rtas_event_scan_rate = *ip; | ||
373 | DEBUG("rtas-event-scan-rate %d\n", rtas_event_scan_rate); | ||
374 | |||
375 | /* Make room for the sequence number */ | ||
376 | rtas_error_log_max = rtas_get_error_log_max(); | ||
377 | rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); | ||
378 | |||
379 | of_node_put(node); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static void do_event_scan(int event_scan) | ||
385 | { | ||
386 | int error; | ||
387 | do { | ||
388 | memset(logdata, 0, rtas_error_log_max); | ||
389 | error = rtas_call(event_scan, 4, 1, NULL, | ||
390 | RTAS_EVENT_SCAN_ALL_EVENTS, 0, | ||
391 | __pa(logdata), rtas_error_log_max); | ||
392 | if (error == -1) { | ||
393 | printk(KERN_ERR "event-scan failed\n"); | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | if (error == 0) | ||
398 | pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0); | ||
399 | |||
400 | } while(error == 0); | ||
401 | } | ||
402 | |||
403 | static void do_event_scan_all_cpus(long delay) | ||
404 | { | ||
405 | int cpu; | ||
406 | |||
407 | lock_cpu_hotplug(); | ||
408 | cpu = first_cpu(cpu_online_map); | ||
409 | for (;;) { | ||
410 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
411 | do_event_scan(rtas_token("event-scan")); | ||
412 | set_cpus_allowed(current, CPU_MASK_ALL); | ||
413 | |||
414 | /* Drop hotplug lock, and sleep for the specified delay */ | ||
415 | unlock_cpu_hotplug(); | ||
416 | msleep_interruptible(delay); | ||
417 | lock_cpu_hotplug(); | ||
418 | |||
419 | cpu = next_cpu(cpu, cpu_online_map); | ||
420 | if (cpu == NR_CPUS) | ||
421 | break; | ||
422 | } | ||
423 | unlock_cpu_hotplug(); | ||
424 | } | ||
425 | |||
426 | static int rtasd(void *unused) | ||
427 | { | ||
428 | unsigned int err_type; | ||
429 | int event_scan = rtas_token("event-scan"); | ||
430 | int rc; | ||
431 | |||
432 | daemonize("rtasd"); | ||
433 | |||
434 | if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1) | ||
435 | goto error; | ||
436 | |||
437 | rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); | ||
438 | if (!rtas_log_buf) { | ||
439 | printk(KERN_ERR "rtasd: no memory\n"); | ||
440 | goto error; | ||
441 | } | ||
442 | |||
443 | printk(KERN_INFO "RTAS daemon started\n"); | ||
444 | |||
445 | DEBUG("will sleep for %d milliseconds\n", (30000/rtas_event_scan_rate)); | ||
446 | |||
447 | /* See if we have any error stored in NVRAM */ | ||
448 | memset(logdata, 0, rtas_error_log_max); | ||
449 | |||
450 | rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type); | ||
451 | |||
452 | /* We can use rtas_log_buf now */ | ||
453 | no_logging = 0; | ||
454 | |||
455 | if (!rc) { | ||
456 | if (err_type != ERR_FLAG_ALREADY_LOGGED) { | ||
457 | pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0); | ||
458 | } | ||
459 | } | ||
460 | |||
461 | /* First pass. */ | ||
462 | do_event_scan_all_cpus(1000); | ||
463 | |||
464 | if (surveillance_timeout != -1) { | ||
465 | DEBUG("enabling surveillance\n"); | ||
466 | enable_surveillance(surveillance_timeout); | ||
467 | DEBUG("surveillance enabled\n"); | ||
468 | } | ||
469 | |||
470 | /* Delay should be at least one second since some | ||
471 | * machines have problems if we call event-scan too | ||
472 | * quickly. */ | ||
473 | for (;;) | ||
474 | do_event_scan_all_cpus(30000/rtas_event_scan_rate); | ||
475 | |||
476 | error: | ||
477 | /* Should delete proc entries */ | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | static int __init rtas_init(void) | ||
482 | { | ||
483 | struct proc_dir_entry *entry; | ||
484 | |||
485 | /* No RTAS, only warn if we are on a pSeries box */ | ||
486 | if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { | ||
487 | if (systemcfg->platform & PLATFORM_PSERIES) | ||
488 | printk(KERN_INFO "rtasd: no event-scan on system\n"); | ||
489 | return 1; | ||
490 | } | ||
491 | |||
492 | entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL); | ||
493 | if (entry) | ||
494 | entry->proc_fops = &proc_rtas_log_operations; | ||
495 | else | ||
496 | printk(KERN_ERR "Failed to create error_log proc entry\n"); | ||
497 | |||
498 | if (kernel_thread(rtasd, NULL, CLONE_FS) < 0) | ||
499 | printk(KERN_ERR "Failed to start RTAS daemon\n"); | ||
500 | |||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static int __init surveillance_setup(char *str) | ||
505 | { | ||
506 | int i; | ||
507 | |||
508 | if (get_option(&str,&i)) { | ||
509 | if (i >= 0 && i <= 255) | ||
510 | surveillance_timeout = i; | ||
511 | } | ||
512 | |||
513 | return 1; | ||
514 | } | ||
515 | |||
516 | static int __init rtasmsgs_setup(char *str) | ||
517 | { | ||
518 | if (strcmp(str, "on") == 0) | ||
519 | full_rtas_msgs = 1; | ||
520 | else if (strcmp(str, "off") == 0) | ||
521 | full_rtas_msgs = 0; | ||
522 | |||
523 | return 1; | ||
524 | } | ||
525 | __initcall(rtas_init); | ||
526 | __setup("surveillance=", surveillance_setup); | ||
527 | __setup("rtasmsgs=", rtasmsgs_setup); | ||
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/ppc64/kernel/scanlog.c index 215bf8900304..2edc947f7c44 100644 --- a/arch/ppc64/kernel/scanlog.c +++ b/arch/ppc64/kernel/scanlog.c | |||
@@ -225,8 +225,7 @@ int __init scanlog_init(void) | |||
225 | void __exit scanlog_cleanup(void) | 225 | void __exit scanlog_cleanup(void) |
226 | { | 226 | { |
227 | if (proc_ppc64_scan_log_dump) { | 227 | if (proc_ppc64_scan_log_dump) { |
228 | if (proc_ppc64_scan_log_dump->data) | 228 | kfree(proc_ppc64_scan_log_dump->data); |
229 | kfree(proc_ppc64_scan_log_dump->data); | ||
230 | remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent); | 229 | remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent); |
231 | } | 230 | } |
232 | } | 231 | } |
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c deleted file mode 100644 index ec9d0984b6a0..000000000000 --- a/arch/ppc64/kernel/signal.c +++ /dev/null | |||
@@ -1,581 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/ppc64/kernel/signal.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/i386/kernel/signal.c" | ||
8 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
9 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/unistd.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/ptrace.h> | ||
30 | #include <linux/module.h> | ||
31 | |||
32 | #include <asm/sigcontext.h> | ||
33 | #include <asm/ucontext.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/ppcdebug.h> | ||
37 | #include <asm/unistd.h> | ||
38 | #include <asm/cacheflush.h> | ||
39 | #include <asm/vdso.h> | ||
40 | |||
41 | #define DEBUG_SIG 0 | ||
42 | |||
43 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
44 | |||
45 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | ||
46 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) | ||
47 | |||
48 | #define TRAMP_TRACEBACK 3 | ||
49 | #define TRAMP_SIZE 6 | ||
50 | |||
51 | /* | ||
52 | * When we have signals to deliver, we set up on the user stack, | ||
53 | * going down from the original stack pointer: | ||
54 | * 1) a rt_sigframe struct which contains the ucontext | ||
55 | * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller | ||
56 | * frame for the signal handler. | ||
57 | */ | ||
58 | |||
59 | struct rt_sigframe { | ||
60 | /* sys_rt_sigreturn requires the ucontext be the first field */ | ||
61 | struct ucontext uc; | ||
62 | unsigned long _unused[2]; | ||
63 | unsigned int tramp[TRAMP_SIZE]; | ||
64 | struct siginfo *pinfo; | ||
65 | void *puc; | ||
66 | struct siginfo info; | ||
67 | /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ | ||
68 | char abigap[288]; | ||
69 | } __attribute__ ((aligned (16))); | ||
70 | |||
71 | |||
72 | /* | ||
73 | * Atomically swap in the new signal mask, and wait for a signal. | ||
74 | */ | ||
75 | long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4, | ||
76 | int p6, int p7, struct pt_regs *regs) | ||
77 | { | ||
78 | sigset_t saveset, newset; | ||
79 | |||
80 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
81 | if (sigsetsize != sizeof(sigset_t)) | ||
82 | return -EINVAL; | ||
83 | |||
84 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
85 | return -EFAULT; | ||
86 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
87 | |||
88 | spin_lock_irq(¤t->sighand->siglock); | ||
89 | saveset = current->blocked; | ||
90 | current->blocked = newset; | ||
91 | recalc_sigpending(); | ||
92 | spin_unlock_irq(¤t->sighand->siglock); | ||
93 | |||
94 | regs->result = -EINTR; | ||
95 | regs->gpr[3] = EINTR; | ||
96 | regs->ccr |= 0x10000000; | ||
97 | while (1) { | ||
98 | current->state = TASK_INTERRUPTIBLE; | ||
99 | schedule(); | ||
100 | if (do_signal(&saveset, regs)) | ||
101 | return 0; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5, | ||
106 | unsigned long r6, unsigned long r7, unsigned long r8, | ||
107 | struct pt_regs *regs) | ||
108 | { | ||
109 | return do_sigaltstack(uss, uoss, regs->gpr[1]); | ||
110 | } | ||
111 | |||
112 | |||
113 | /* | ||
114 | * Set up the sigcontext for the signal frame. | ||
115 | */ | ||
116 | |||
117 | static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | ||
118 | int signr, sigset_t *set, unsigned long handler) | ||
119 | { | ||
120 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the | ||
121 | * process never used altivec yet (MSR_VEC is zero in pt_regs of | ||
122 | * the context). This is very important because we must ensure we | ||
123 | * don't lose the VRSAVE content that may have been set prior to | ||
124 | * the process doing its first vector operation | ||
125 | * Userland shall check AT_HWCAP to know wether it can rely on the | ||
126 | * v_regs pointer or not | ||
127 | */ | ||
128 | #ifdef CONFIG_ALTIVEC | ||
129 | elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful); | ||
130 | #endif | ||
131 | long err = 0; | ||
132 | |||
133 | flush_fp_to_thread(current); | ||
134 | |||
135 | /* Make sure signal doesn't get spurrious FP exceptions */ | ||
136 | current->thread.fpscr.val = 0; | ||
137 | |||
138 | #ifdef CONFIG_ALTIVEC | ||
139 | err |= __put_user(v_regs, &sc->v_regs); | ||
140 | |||
141 | /* save altivec registers */ | ||
142 | if (current->thread.used_vr) { | ||
143 | flush_altivec_to_thread(current); | ||
144 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ | ||
145 | err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); | ||
146 | /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) | ||
147 | * contains valid data. | ||
148 | */ | ||
149 | regs->msr |= MSR_VEC; | ||
150 | } | ||
151 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | ||
152 | * use altivec. | ||
153 | */ | ||
154 | err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); | ||
155 | #else /* CONFIG_ALTIVEC */ | ||
156 | err |= __put_user(0, &sc->v_regs); | ||
157 | #endif /* CONFIG_ALTIVEC */ | ||
158 | err |= __put_user(&sc->gp_regs, &sc->regs); | ||
159 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); | ||
160 | err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); | ||
161 | err |= __put_user(signr, &sc->signal); | ||
162 | err |= __put_user(handler, &sc->handler); | ||
163 | if (set != NULL) | ||
164 | err |= __put_user(set->sig[0], &sc->oldmask); | ||
165 | |||
166 | return err; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Restore the sigcontext from the signal frame. | ||
171 | */ | ||
172 | |||
173 | static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | ||
174 | struct sigcontext __user *sc) | ||
175 | { | ||
176 | #ifdef CONFIG_ALTIVEC | ||
177 | elf_vrreg_t __user *v_regs; | ||
178 | #endif | ||
179 | unsigned long err = 0; | ||
180 | unsigned long save_r13 = 0; | ||
181 | elf_greg_t *gregs = (elf_greg_t *)regs; | ||
182 | #ifdef CONFIG_ALTIVEC | ||
183 | unsigned long msr; | ||
184 | #endif | ||
185 | int i; | ||
186 | |||
187 | /* If this is not a signal return, we preserve the TLS in r13 */ | ||
188 | if (!sig) | ||
189 | save_r13 = regs->gpr[13]; | ||
190 | |||
191 | /* copy everything before MSR */ | ||
192 | err |= __copy_from_user(regs, &sc->gp_regs, | ||
193 | PT_MSR*sizeof(unsigned long)); | ||
194 | |||
195 | /* skip MSR and SOFTE */ | ||
196 | for (i = PT_MSR+1; i <= PT_RESULT; i++) { | ||
197 | if (i == PT_SOFTE) | ||
198 | continue; | ||
199 | err |= __get_user(gregs[i], &sc->gp_regs[i]); | ||
200 | } | ||
201 | |||
202 | if (!sig) | ||
203 | regs->gpr[13] = save_r13; | ||
204 | err |= __copy_from_user(¤t->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); | ||
205 | if (set != NULL) | ||
206 | err |= __get_user(set->sig[0], &sc->oldmask); | ||
207 | |||
208 | #ifdef CONFIG_ALTIVEC | ||
209 | err |= __get_user(v_regs, &sc->v_regs); | ||
210 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | ||
211 | if (err) | ||
212 | return err; | ||
213 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ | ||
214 | if (v_regs != 0 && (msr & MSR_VEC) != 0) | ||
215 | err |= __copy_from_user(current->thread.vr, v_regs, | ||
216 | 33 * sizeof(vector128)); | ||
217 | else if (current->thread.used_vr) | ||
218 | memset(current->thread.vr, 0, 33 * sizeof(vector128)); | ||
219 | /* Always get VRSAVE back */ | ||
220 | if (v_regs != 0) | ||
221 | err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); | ||
222 | else | ||
223 | current->thread.vrsave = 0; | ||
224 | #endif /* CONFIG_ALTIVEC */ | ||
225 | |||
226 | #ifndef CONFIG_SMP | ||
227 | preempt_disable(); | ||
228 | if (last_task_used_math == current) | ||
229 | last_task_used_math = NULL; | ||
230 | if (last_task_used_altivec == current) | ||
231 | last_task_used_altivec = NULL; | ||
232 | preempt_enable(); | ||
233 | #endif | ||
234 | /* Force reload of FP/VEC */ | ||
235 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); | ||
236 | |||
237 | return err; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * Allocate space for the signal frame | ||
242 | */ | ||
243 | static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | ||
244 | size_t frame_size) | ||
245 | { | ||
246 | unsigned long newsp; | ||
247 | |||
248 | /* Default to using normal stack */ | ||
249 | newsp = regs->gpr[1]; | ||
250 | |||
251 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
252 | if (! on_sig_stack(regs->gpr[1])) | ||
253 | newsp = (current->sas_ss_sp + current->sas_ss_size); | ||
254 | } | ||
255 | |||
256 | return (void __user *)((newsp - frame_size) & -16ul); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Setup the trampoline code on the stack | ||
261 | */ | ||
262 | static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) | ||
263 | { | ||
264 | int i; | ||
265 | long err = 0; | ||
266 | |||
267 | /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ | ||
268 | err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); | ||
269 | /* li r0, __NR_[rt_]sigreturn| */ | ||
270 | err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); | ||
271 | /* sc */ | ||
272 | err |= __put_user(0x44000002UL, &tramp[2]); | ||
273 | |||
274 | /* Minimal traceback info */ | ||
275 | for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) | ||
276 | err |= __put_user(0, &tramp[i]); | ||
277 | |||
278 | if (!err) | ||
279 | flush_icache_range((unsigned long) &tramp[0], | ||
280 | (unsigned long) &tramp[TRAMP_SIZE]); | ||
281 | |||
282 | return err; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Restore the user process's signal mask (also used by signal32.c) | ||
287 | */ | ||
288 | void restore_sigmask(sigset_t *set) | ||
289 | { | ||
290 | sigdelsetmask(set, ~_BLOCKABLE); | ||
291 | spin_lock_irq(¤t->sighand->siglock); | ||
292 | current->blocked = *set; | ||
293 | recalc_sigpending(); | ||
294 | spin_unlock_irq(¤t->sighand->siglock); | ||
295 | } | ||
296 | |||
297 | |||
298 | /* | ||
299 | * Handle {get,set,swap}_context operations | ||
300 | */ | ||
301 | int sys_swapcontext(struct ucontext __user *old_ctx, | ||
302 | struct ucontext __user *new_ctx, | ||
303 | long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) | ||
304 | { | ||
305 | unsigned char tmp; | ||
306 | sigset_t set; | ||
307 | |||
308 | /* Context size is for future use. Right now, we only make sure | ||
309 | * we are passed something we understand | ||
310 | */ | ||
311 | if (ctx_size < sizeof(struct ucontext)) | ||
312 | return -EINVAL; | ||
313 | |||
314 | if (old_ctx != NULL) { | ||
315 | if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) | ||
316 | || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0) | ||
317 | || __copy_to_user(&old_ctx->uc_sigmask, | ||
318 | ¤t->blocked, sizeof(sigset_t))) | ||
319 | return -EFAULT; | ||
320 | } | ||
321 | if (new_ctx == NULL) | ||
322 | return 0; | ||
323 | if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) | ||
324 | || __get_user(tmp, (u8 __user *) new_ctx) | ||
325 | || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) | ||
326 | return -EFAULT; | ||
327 | |||
328 | /* | ||
329 | * If we get a fault copying the context into the kernel's | ||
330 | * image of the user's registers, we can't just return -EFAULT | ||
331 | * because the user's registers will be corrupted. For instance | ||
332 | * the NIP value may have been updated but not some of the | ||
333 | * other registers. Given that we have done the access_ok | ||
334 | * and successfully read the first and last bytes of the region | ||
335 | * above, this should only happen in an out-of-memory situation | ||
336 | * or if another thread unmaps the region containing the context. | ||
337 | * We kill the task with a SIGSEGV in this situation. | ||
338 | */ | ||
339 | |||
340 | if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) | ||
341 | do_exit(SIGSEGV); | ||
342 | restore_sigmask(&set); | ||
343 | if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) | ||
344 | do_exit(SIGSEGV); | ||
345 | |||
346 | /* This returns like rt_sigreturn */ | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | |||
351 | /* | ||
352 | * Do a signal return; undo the signal stack. | ||
353 | */ | ||
354 | |||
355 | int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, | ||
356 | unsigned long r6, unsigned long r7, unsigned long r8, | ||
357 | struct pt_regs *regs) | ||
358 | { | ||
359 | struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; | ||
360 | sigset_t set; | ||
361 | |||
362 | /* Always make any pending restarted system calls return -EINTR */ | ||
363 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
364 | |||
365 | if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) | ||
366 | goto badframe; | ||
367 | |||
368 | if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | ||
369 | goto badframe; | ||
370 | restore_sigmask(&set); | ||
371 | if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) | ||
372 | goto badframe; | ||
373 | |||
374 | /* do_sigaltstack expects a __user pointer and won't modify | ||
375 | * what's in there anyway | ||
376 | */ | ||
377 | do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); | ||
378 | |||
379 | return regs->result; | ||
380 | |||
381 | badframe: | ||
382 | #if DEBUG_SIG | ||
383 | printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", | ||
384 | regs, uc, &uc->uc_mcontext); | ||
385 | #endif | ||
386 | force_sig(SIGSEGV, current); | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | ||
391 | sigset_t *set, struct pt_regs *regs) | ||
392 | { | ||
393 | /* Handler is *really* a pointer to the function descriptor for | ||
394 | * the signal routine. The first entry in the function | ||
395 | * descriptor is the entry address of signal and the second | ||
396 | * entry is the TOC value we need to use. | ||
397 | */ | ||
398 | func_descr_t __user *funct_desc_ptr; | ||
399 | struct rt_sigframe __user *frame; | ||
400 | unsigned long newsp = 0; | ||
401 | long err = 0; | ||
402 | |||
403 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
404 | |||
405 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
406 | goto badframe; | ||
407 | |||
408 | err |= __put_user(&frame->info, &frame->pinfo); | ||
409 | err |= __put_user(&frame->uc, &frame->puc); | ||
410 | err |= copy_siginfo_to_user(&frame->info, info); | ||
411 | if (err) | ||
412 | goto badframe; | ||
413 | |||
414 | /* Create the ucontext. */ | ||
415 | err |= __put_user(0, &frame->uc.uc_flags); | ||
416 | err |= __put_user(0, &frame->uc.uc_link); | ||
417 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | ||
418 | err |= __put_user(sas_ss_flags(regs->gpr[1]), | ||
419 | &frame->uc.uc_stack.ss_flags); | ||
420 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
421 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, | ||
422 | (unsigned long)ka->sa.sa_handler); | ||
423 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
424 | if (err) | ||
425 | goto badframe; | ||
426 | |||
427 | /* Set up to return from userspace. */ | ||
428 | if (vdso64_rt_sigtramp && current->thread.vdso_base) { | ||
429 | regs->link = current->thread.vdso_base + vdso64_rt_sigtramp; | ||
430 | } else { | ||
431 | err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); | ||
432 | if (err) | ||
433 | goto badframe; | ||
434 | regs->link = (unsigned long) &frame->tramp[0]; | ||
435 | } | ||
436 | funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler; | ||
437 | |||
438 | /* Allocate a dummy caller frame for the signal handler. */ | ||
439 | newsp = (unsigned long)frame - __SIGNAL_FRAMESIZE; | ||
440 | err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); | ||
441 | |||
442 | /* Set up "regs" so we "return" to the signal handler. */ | ||
443 | err |= get_user(regs->nip, &funct_desc_ptr->entry); | ||
444 | regs->gpr[1] = newsp; | ||
445 | err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); | ||
446 | regs->gpr[3] = signr; | ||
447 | regs->result = 0; | ||
448 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
449 | err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); | ||
450 | err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); | ||
451 | regs->gpr[6] = (unsigned long) frame; | ||
452 | } else { | ||
453 | regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; | ||
454 | } | ||
455 | if (err) | ||
456 | goto badframe; | ||
457 | |||
458 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
459 | ptrace_notify(SIGTRAP); | ||
460 | |||
461 | return 1; | ||
462 | |||
463 | badframe: | ||
464 | #if DEBUG_SIG | ||
465 | printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", | ||
466 | regs, frame, newsp); | ||
467 | #endif | ||
468 | force_sigsegv(signr, current); | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | |||
473 | /* | ||
474 | * OK, we're invoking a handler | ||
475 | */ | ||
476 | static int handle_signal(unsigned long sig, struct k_sigaction *ka, | ||
477 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) | ||
478 | { | ||
479 | int ret; | ||
480 | |||
481 | /* Set up Signal Frame */ | ||
482 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
483 | |||
484 | if (ret) { | ||
485 | spin_lock_irq(¤t->sighand->siglock); | ||
486 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
487 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
488 | sigaddset(¤t->blocked,sig); | ||
489 | recalc_sigpending(); | ||
490 | spin_unlock_irq(¤t->sighand->siglock); | ||
491 | } | ||
492 | |||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) | ||
497 | { | ||
498 | switch ((int)regs->result) { | ||
499 | case -ERESTART_RESTARTBLOCK: | ||
500 | case -ERESTARTNOHAND: | ||
501 | /* ERESTARTNOHAND means that the syscall should only be | ||
502 | * restarted if there was no handler for the signal, and since | ||
503 | * we only get here if there is a handler, we dont restart. | ||
504 | */ | ||
505 | regs->result = -EINTR; | ||
506 | break; | ||
507 | case -ERESTARTSYS: | ||
508 | /* ERESTARTSYS means to restart the syscall if there is no | ||
509 | * handler or the handler was registered with SA_RESTART | ||
510 | */ | ||
511 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
512 | regs->result = -EINTR; | ||
513 | break; | ||
514 | } | ||
515 | /* fallthrough */ | ||
516 | case -ERESTARTNOINTR: | ||
517 | /* ERESTARTNOINTR means that the syscall should be | ||
518 | * called again after the signal handler returns. | ||
519 | */ | ||
520 | regs->gpr[3] = regs->orig_gpr3; | ||
521 | regs->nip -= 4; | ||
522 | regs->result = 0; | ||
523 | break; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
529 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
530 | * mistake. | ||
531 | */ | ||
532 | int do_signal(sigset_t *oldset, struct pt_regs *regs) | ||
533 | { | ||
534 | siginfo_t info; | ||
535 | int signr; | ||
536 | struct k_sigaction ka; | ||
537 | |||
538 | /* | ||
539 | * If the current thread is 32 bit - invoke the | ||
540 | * 32 bit signal handling code | ||
541 | */ | ||
542 | if (test_thread_flag(TIF_32BIT)) | ||
543 | return do_signal32(oldset, regs); | ||
544 | |||
545 | if (!oldset) | ||
546 | oldset = ¤t->blocked; | ||
547 | |||
548 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
549 | if (signr > 0) { | ||
550 | /* Whee! Actually deliver the signal. */ | ||
551 | if (TRAP(regs) == 0x0C00) | ||
552 | syscall_restart(regs, &ka); | ||
553 | |||
554 | /* | ||
555 | * Reenable the DABR before delivering the signal to | ||
556 | * user space. The DABR will have been cleared if it | ||
557 | * triggered inside the kernel. | ||
558 | */ | ||
559 | if (current->thread.dabr) | ||
560 | set_dabr(current->thread.dabr); | ||
561 | |||
562 | return handle_signal(signr, &ka, &info, oldset, regs); | ||
563 | } | ||
564 | |||
565 | if (TRAP(regs) == 0x0C00) { /* System Call! */ | ||
566 | if ((int)regs->result == -ERESTARTNOHAND || | ||
567 | (int)regs->result == -ERESTARTSYS || | ||
568 | (int)regs->result == -ERESTARTNOINTR) { | ||
569 | regs->gpr[3] = regs->orig_gpr3; | ||
570 | regs->nip -= 4; /* Back up & retry system call */ | ||
571 | regs->result = 0; | ||
572 | } else if ((int)regs->result == -ERESTART_RESTARTBLOCK) { | ||
573 | regs->gpr[0] = __NR_restart_syscall; | ||
574 | regs->nip -= 4; | ||
575 | regs->result = 0; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | return 0; | ||
580 | } | ||
581 | EXPORT_SYMBOL(do_signal); | ||
diff --git a/arch/ppc64/kernel/smp-tbsync.c b/arch/ppc64/kernel/smp-tbsync.c deleted file mode 100644 index 7d8ec9996b3e..000000000000 --- a/arch/ppc64/kernel/smp-tbsync.c +++ /dev/null | |||
@@ -1,179 +0,0 @@ | |||
1 | /* | ||
2 | * Smp timebase synchronization for ppc. | ||
3 | * | ||
4 | * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/unistd.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/time.h> | ||
17 | |||
18 | #define NUM_ITER 300 | ||
19 | |||
20 | enum { | ||
21 | kExit=0, kSetAndTest, kTest | ||
22 | }; | ||
23 | |||
24 | static struct { | ||
25 | volatile long tb; | ||
26 | volatile long mark; | ||
27 | volatile int cmd; | ||
28 | volatile int handshake; | ||
29 | int filler[3]; | ||
30 | |||
31 | volatile int ack; | ||
32 | int filler2[7]; | ||
33 | |||
34 | volatile int race_result; | ||
35 | } *tbsync; | ||
36 | |||
37 | static volatile int running; | ||
38 | |||
39 | static void __devinit | ||
40 | enter_contest( long mark, long add ) | ||
41 | { | ||
42 | while( (long)(mftb() - mark) < 0 ) | ||
43 | tbsync->race_result = add; | ||
44 | } | ||
45 | |||
46 | void __devinit | ||
47 | smp_generic_take_timebase( void ) | ||
48 | { | ||
49 | int cmd; | ||
50 | long tb; | ||
51 | |||
52 | local_irq_disable(); | ||
53 | while( !running ) | ||
54 | ; | ||
55 | rmb(); | ||
56 | |||
57 | for( ;; ) { | ||
58 | tbsync->ack = 1; | ||
59 | while( !tbsync->handshake ) | ||
60 | ; | ||
61 | rmb(); | ||
62 | |||
63 | cmd = tbsync->cmd; | ||
64 | tb = tbsync->tb; | ||
65 | tbsync->ack = 0; | ||
66 | if( cmd == kExit ) | ||
67 | return; | ||
68 | |||
69 | if( cmd == kSetAndTest ) { | ||
70 | while( tbsync->handshake ) | ||
71 | ; | ||
72 | asm volatile ("mttbl %0" :: "r" (tb & 0xfffffffful) ); | ||
73 | asm volatile ("mttbu %0" :: "r" (tb >> 32) ); | ||
74 | } else { | ||
75 | while( tbsync->handshake ) | ||
76 | ; | ||
77 | } | ||
78 | enter_contest( tbsync->mark, -1 ); | ||
79 | } | ||
80 | local_irq_enable(); | ||
81 | } | ||
82 | |||
83 | static int __devinit | ||
84 | start_contest( int cmd, long offset, long num ) | ||
85 | { | ||
86 | int i, score=0; | ||
87 | long tb, mark; | ||
88 | |||
89 | tbsync->cmd = cmd; | ||
90 | |||
91 | local_irq_disable(); | ||
92 | for( i=-3; i<num; ) { | ||
93 | tb = (long)mftb() + 400; | ||
94 | tbsync->tb = tb + offset; | ||
95 | tbsync->mark = mark = tb + 400; | ||
96 | |||
97 | wmb(); | ||
98 | |||
99 | tbsync->handshake = 1; | ||
100 | while( tbsync->ack ) | ||
101 | ; | ||
102 | |||
103 | while( (long)(mftb() - tb) <= 0 ) | ||
104 | ; | ||
105 | tbsync->handshake = 0; | ||
106 | enter_contest( mark, 1 ); | ||
107 | |||
108 | while( !tbsync->ack ) | ||
109 | ; | ||
110 | |||
111 | if ((tbsync->tb ^ (long)mftb()) & 0x8000000000000000ul) | ||
112 | continue; | ||
113 | if( i++ > 0 ) | ||
114 | score += tbsync->race_result; | ||
115 | } | ||
116 | local_irq_enable(); | ||
117 | return score; | ||
118 | } | ||
119 | |||
120 | void __devinit | ||
121 | smp_generic_give_timebase( void ) | ||
122 | { | ||
123 | int i, score, score2, old, min=0, max=5000, offset=1000; | ||
124 | |||
125 | printk("Synchronizing timebase\n"); | ||
126 | |||
127 | /* if this fails then this kernel won't work anyway... */ | ||
128 | tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL ); | ||
129 | memset( tbsync, 0, sizeof(*tbsync) ); | ||
130 | mb(); | ||
131 | running = 1; | ||
132 | |||
133 | while( !tbsync->ack ) | ||
134 | ; | ||
135 | |||
136 | printk("Got ack\n"); | ||
137 | |||
138 | /* binary search */ | ||
139 | for( old=-1 ; old != offset ; offset=(min+max)/2 ) { | ||
140 | score = start_contest( kSetAndTest, offset, NUM_ITER ); | ||
141 | |||
142 | printk("score %d, offset %d\n", score, offset ); | ||
143 | |||
144 | if( score > 0 ) | ||
145 | max = offset; | ||
146 | else | ||
147 | min = offset; | ||
148 | old = offset; | ||
149 | } | ||
150 | score = start_contest( kSetAndTest, min, NUM_ITER ); | ||
151 | score2 = start_contest( kSetAndTest, max, NUM_ITER ); | ||
152 | |||
153 | printk( "Min %d (score %d), Max %d (score %d)\n", min, score, max, score2 ); | ||
154 | score = abs( score ); | ||
155 | score2 = abs( score2 ); | ||
156 | offset = (score < score2) ? min : max; | ||
157 | |||
158 | /* guard against inaccurate mttb */ | ||
159 | for( i=0; i<10; i++ ) { | ||
160 | start_contest( kSetAndTest, offset, NUM_ITER/10 ); | ||
161 | |||
162 | if( (score2=start_contest(kTest, offset, NUM_ITER)) < 0 ) | ||
163 | score2 = -score2; | ||
164 | if( score2 <= score || score2 < 20 ) | ||
165 | break; | ||
166 | } | ||
167 | printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); | ||
168 | |||
169 | /* exiting */ | ||
170 | tbsync->cmd = kExit; | ||
171 | wmb(); | ||
172 | tbsync->handshake = 1; | ||
173 | while( tbsync->ack ) | ||
174 | ; | ||
175 | tbsync->handshake = 0; | ||
176 | kfree( tbsync ); | ||
177 | tbsync = NULL; | ||
178 | running = 0; | ||
179 | } | ||
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c deleted file mode 100644 index 017c12919832..000000000000 --- a/arch/ppc64/kernel/smp.c +++ /dev/null | |||
@@ -1,568 +0,0 @@ | |||
1 | /* | ||
2 | * SMP support for ppc. | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | ||
5 | * deal of code from the sparc and intel versions. | ||
6 | * | ||
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * | ||
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | ||
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #undef DEBUG | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/err.h> | ||
31 | #include <linux/sysdev.h> | ||
32 | #include <linux/cpu.h> | ||
33 | #include <linux/notifier.h> | ||
34 | |||
35 | #include <asm/ptrace.h> | ||
36 | #include <asm/atomic.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/prom.h> | ||
41 | #include <asm/smp.h> | ||
42 | #include <asm/paca.h> | ||
43 | #include <asm/time.h> | ||
44 | #include <asm/machdep.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/system.h> | ||
47 | #include <asm/abs_addr.h> | ||
48 | #include <asm/mpic.h> | ||
49 | |||
50 | #ifdef DEBUG | ||
51 | #define DBG(fmt...) udbg_printf(fmt) | ||
52 | #else | ||
53 | #define DBG(fmt...) | ||
54 | #endif | ||
55 | |||
56 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
57 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
58 | cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | ||
59 | |||
60 | EXPORT_SYMBOL(cpu_online_map); | ||
61 | EXPORT_SYMBOL(cpu_possible_map); | ||
62 | |||
63 | struct smp_ops_t *smp_ops; | ||
64 | |||
65 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
66 | |||
67 | void smp_call_function_interrupt(void); | ||
68 | |||
69 | int smt_enabled_at_boot = 1; | ||
70 | |||
71 | #ifdef CONFIG_MPIC | ||
72 | int __init smp_mpic_probe(void) | ||
73 | { | ||
74 | int nr_cpus; | ||
75 | |||
76 | DBG("smp_mpic_probe()...\n"); | ||
77 | |||
78 | nr_cpus = cpus_weight(cpu_possible_map); | ||
79 | |||
80 | DBG("nr_cpus: %d\n", nr_cpus); | ||
81 | |||
82 | if (nr_cpus > 1) | ||
83 | mpic_request_ipis(); | ||
84 | |||
85 | return nr_cpus; | ||
86 | } | ||
87 | |||
88 | void __devinit smp_mpic_setup_cpu(int cpu) | ||
89 | { | ||
90 | mpic_setup_this_cpu(); | ||
91 | } | ||
92 | |||
93 | void __devinit smp_generic_kick_cpu(int nr) | ||
94 | { | ||
95 | BUG_ON(nr < 0 || nr >= NR_CPUS); | ||
96 | |||
97 | /* | ||
98 | * The processor is currently spinning, waiting for the | ||
99 | * cpu_start field to become non-zero After we set cpu_start, | ||
100 | * the processor will continue on to secondary_start | ||
101 | */ | ||
102 | paca[nr].cpu_start = 1; | ||
103 | smp_mb(); | ||
104 | } | ||
105 | |||
106 | #endif /* CONFIG_MPIC */ | ||
107 | |||
108 | void smp_message_recv(int msg, struct pt_regs *regs) | ||
109 | { | ||
110 | switch(msg) { | ||
111 | case PPC_MSG_CALL_FUNCTION: | ||
112 | smp_call_function_interrupt(); | ||
113 | break; | ||
114 | case PPC_MSG_RESCHEDULE: | ||
115 | /* XXX Do we have to do this? */ | ||
116 | set_need_resched(); | ||
117 | break; | ||
118 | #if 0 | ||
119 | case PPC_MSG_MIGRATE_TASK: | ||
120 | /* spare */ | ||
121 | break; | ||
122 | #endif | ||
123 | #ifdef CONFIG_DEBUGGER | ||
124 | case PPC_MSG_DEBUGGER_BREAK: | ||
125 | debugger_ipi(regs); | ||
126 | break; | ||
127 | #endif | ||
128 | default: | ||
129 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
130 | smp_processor_id(), msg); | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | void smp_send_reschedule(int cpu) | ||
136 | { | ||
137 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | ||
138 | } | ||
139 | |||
140 | #ifdef CONFIG_DEBUGGER | ||
141 | void smp_send_debugger_break(int cpu) | ||
142 | { | ||
143 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | ||
144 | } | ||
145 | #endif | ||
146 | |||
147 | static void stop_this_cpu(void *dummy) | ||
148 | { | ||
149 | local_irq_disable(); | ||
150 | while (1) | ||
151 | ; | ||
152 | } | ||
153 | |||
154 | void smp_send_stop(void) | ||
155 | { | ||
156 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Structure and data for smp_call_function(). This is designed to minimise | ||
161 | * static memory requirements. It also looks cleaner. | ||
162 | * Stolen from the i386 version. | ||
163 | */ | ||
164 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
165 | |||
166 | static struct call_data_struct { | ||
167 | void (*func) (void *info); | ||
168 | void *info; | ||
169 | atomic_t started; | ||
170 | atomic_t finished; | ||
171 | int wait; | ||
172 | } *call_data; | ||
173 | |||
174 | /* delay of at least 8 seconds on 1GHz cpu */ | ||
175 | #define SMP_CALL_TIMEOUT (1UL << (30 + 3)) | ||
176 | |||
177 | /* | ||
178 | * This function sends a 'generic call function' IPI to all other CPUs | ||
179 | * in the system. | ||
180 | * | ||
181 | * [SUMMARY] Run a function on all other CPUs. | ||
182 | * <func> The function to run. This must be fast and non-blocking. | ||
183 | * <info> An arbitrary pointer to pass to the function. | ||
184 | * <nonatomic> currently unused. | ||
185 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
186 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
187 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
188 | * | ||
189 | * You must not call this function with disabled interrupts or from a | ||
190 | * hardware interrupt handler or from a bottom half handler. | ||
191 | */ | ||
192 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | ||
193 | int wait) | ||
194 | { | ||
195 | struct call_data_struct data; | ||
196 | int ret = -1, cpus; | ||
197 | unsigned long timeout; | ||
198 | |||
199 | /* Can deadlock when called with interrupts disabled */ | ||
200 | WARN_ON(irqs_disabled()); | ||
201 | |||
202 | data.func = func; | ||
203 | data.info = info; | ||
204 | atomic_set(&data.started, 0); | ||
205 | data.wait = wait; | ||
206 | if (wait) | ||
207 | atomic_set(&data.finished, 0); | ||
208 | |||
209 | spin_lock(&call_lock); | ||
210 | /* Must grab online cpu count with preempt disabled, otherwise | ||
211 | * it can change. */ | ||
212 | cpus = num_online_cpus() - 1; | ||
213 | if (!cpus) { | ||
214 | ret = 0; | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | call_data = &data; | ||
219 | smp_wmb(); | ||
220 | /* Send a message to all other CPUs and wait for them to respond */ | ||
221 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | ||
222 | |||
223 | /* Wait for response */ | ||
224 | timeout = SMP_CALL_TIMEOUT; | ||
225 | while (atomic_read(&data.started) != cpus) { | ||
226 | HMT_low(); | ||
227 | if (--timeout == 0) { | ||
228 | printk("smp_call_function on cpu %d: other cpus not " | ||
229 | "responding (%d)\n", smp_processor_id(), | ||
230 | atomic_read(&data.started)); | ||
231 | debugger(NULL); | ||
232 | goto out; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | if (wait) { | ||
237 | timeout = SMP_CALL_TIMEOUT; | ||
238 | while (atomic_read(&data.finished) != cpus) { | ||
239 | HMT_low(); | ||
240 | if (--timeout == 0) { | ||
241 | printk("smp_call_function on cpu %d: other " | ||
242 | "cpus not finishing (%d/%d)\n", | ||
243 | smp_processor_id(), | ||
244 | atomic_read(&data.finished), | ||
245 | atomic_read(&data.started)); | ||
246 | debugger(NULL); | ||
247 | goto out; | ||
248 | } | ||
249 | } | ||
250 | } | ||
251 | |||
252 | ret = 0; | ||
253 | |||
254 | out: | ||
255 | call_data = NULL; | ||
256 | HMT_medium(); | ||
257 | spin_unlock(&call_lock); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | EXPORT_SYMBOL(smp_call_function); | ||
262 | |||
263 | void smp_call_function_interrupt(void) | ||
264 | { | ||
265 | void (*func) (void *info); | ||
266 | void *info; | ||
267 | int wait; | ||
268 | |||
269 | /* call_data will be NULL if the sender timed out while | ||
270 | * waiting on us to receive the call. | ||
271 | */ | ||
272 | if (!call_data) | ||
273 | return; | ||
274 | |||
275 | func = call_data->func; | ||
276 | info = call_data->info; | ||
277 | wait = call_data->wait; | ||
278 | |||
279 | if (!wait) | ||
280 | smp_mb__before_atomic_inc(); | ||
281 | |||
282 | /* | ||
283 | * Notify initiating CPU that I've grabbed the data and am | ||
284 | * about to execute the function | ||
285 | */ | ||
286 | atomic_inc(&call_data->started); | ||
287 | /* | ||
288 | * At this point the info structure may be out of scope unless wait==1 | ||
289 | */ | ||
290 | (*func)(info); | ||
291 | if (wait) { | ||
292 | smp_mb__before_atomic_inc(); | ||
293 | atomic_inc(&call_data->finished); | ||
294 | } | ||
295 | } | ||
296 | |||
297 | extern struct gettimeofday_struct do_gtod; | ||
298 | |||
299 | struct thread_info *current_set[NR_CPUS]; | ||
300 | |||
301 | DECLARE_PER_CPU(unsigned int, pvr); | ||
302 | |||
303 | static void __devinit smp_store_cpu_info(int id) | ||
304 | { | ||
305 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | ||
306 | } | ||
307 | |||
308 | static void __init smp_create_idle(unsigned int cpu) | ||
309 | { | ||
310 | struct task_struct *p; | ||
311 | |||
312 | /* create a process for the processor */ | ||
313 | p = fork_idle(cpu); | ||
314 | if (IS_ERR(p)) | ||
315 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
316 | paca[cpu].__current = p; | ||
317 | current_set[cpu] = p->thread_info; | ||
318 | } | ||
319 | |||
320 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
321 | { | ||
322 | unsigned int cpu; | ||
323 | |||
324 | DBG("smp_prepare_cpus\n"); | ||
325 | |||
326 | /* | ||
327 | * setup_cpu may need to be called on the boot cpu. We havent | ||
328 | * spun any cpus up but lets be paranoid. | ||
329 | */ | ||
330 | BUG_ON(boot_cpuid != smp_processor_id()); | ||
331 | |||
332 | /* Fixup boot cpu */ | ||
333 | smp_store_cpu_info(boot_cpuid); | ||
334 | cpu_callin_map[boot_cpuid] = 1; | ||
335 | |||
336 | #ifndef CONFIG_PPC_ISERIES | ||
337 | paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb(); | ||
338 | |||
339 | /* | ||
340 | * Should update do_gtod.stamp_xsec. | ||
341 | * For now we leave it which means the time can be some | ||
342 | * number of msecs off until someone does a settimeofday() | ||
343 | */ | ||
344 | do_gtod.varp->tb_orig_stamp = tb_last_stamp; | ||
345 | systemcfg->tb_orig_stamp = tb_last_stamp; | ||
346 | #endif | ||
347 | |||
348 | max_cpus = smp_ops->probe(); | ||
349 | |||
350 | smp_space_timers(max_cpus); | ||
351 | |||
352 | for_each_cpu(cpu) | ||
353 | if (cpu != boot_cpuid) | ||
354 | smp_create_idle(cpu); | ||
355 | } | ||
356 | |||
357 | void __devinit smp_prepare_boot_cpu(void) | ||
358 | { | ||
359 | BUG_ON(smp_processor_id() != boot_cpuid); | ||
360 | |||
361 | cpu_set(boot_cpuid, cpu_online_map); | ||
362 | |||
363 | paca[boot_cpuid].__current = current; | ||
364 | current_set[boot_cpuid] = current->thread_info; | ||
365 | } | ||
366 | |||
367 | #ifdef CONFIG_HOTPLUG_CPU | ||
368 | /* State of each CPU during hotplug phases */ | ||
369 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
370 | |||
371 | int generic_cpu_disable(void) | ||
372 | { | ||
373 | unsigned int cpu = smp_processor_id(); | ||
374 | |||
375 | if (cpu == boot_cpuid) | ||
376 | return -EBUSY; | ||
377 | |||
378 | systemcfg->processorCount--; | ||
379 | cpu_clear(cpu, cpu_online_map); | ||
380 | fixup_irqs(cpu_online_map); | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | int generic_cpu_enable(unsigned int cpu) | ||
385 | { | ||
386 | /* Do the normal bootup if we haven't | ||
387 | * already bootstrapped. */ | ||
388 | if (system_state != SYSTEM_RUNNING) | ||
389 | return -ENOSYS; | ||
390 | |||
391 | /* get the target out of it's holding state */ | ||
392 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
393 | smp_wmb(); | ||
394 | |||
395 | while (!cpu_online(cpu)) | ||
396 | cpu_relax(); | ||
397 | |||
398 | fixup_irqs(cpu_online_map); | ||
399 | /* counter the irq disable in fixup_irqs */ | ||
400 | local_irq_enable(); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | void generic_cpu_die(unsigned int cpu) | ||
405 | { | ||
406 | int i; | ||
407 | |||
408 | for (i = 0; i < 100; i++) { | ||
409 | smp_rmb(); | ||
410 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | ||
411 | return; | ||
412 | msleep(100); | ||
413 | } | ||
414 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | ||
415 | } | ||
416 | |||
417 | void generic_mach_cpu_die(void) | ||
418 | { | ||
419 | unsigned int cpu; | ||
420 | |||
421 | local_irq_disable(); | ||
422 | cpu = smp_processor_id(); | ||
423 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | ||
424 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
425 | smp_wmb(); | ||
426 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | ||
427 | cpu_relax(); | ||
428 | |||
429 | flush_tlb_pending(); | ||
430 | cpu_set(cpu, cpu_online_map); | ||
431 | local_irq_enable(); | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | static int __devinit cpu_enable(unsigned int cpu) | ||
436 | { | ||
437 | if (smp_ops->cpu_enable) | ||
438 | return smp_ops->cpu_enable(cpu); | ||
439 | |||
440 | return -ENOSYS; | ||
441 | } | ||
442 | |||
443 | int __devinit __cpu_up(unsigned int cpu) | ||
444 | { | ||
445 | int c; | ||
446 | |||
447 | if (!cpu_enable(cpu)) | ||
448 | return 0; | ||
449 | |||
450 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | ||
451 | return -EINVAL; | ||
452 | |||
453 | paca[cpu].default_decr = tb_ticks_per_jiffy; | ||
454 | |||
455 | /* Make sure callin-map entry is 0 (can be leftover a CPU | ||
456 | * hotplug | ||
457 | */ | ||
458 | cpu_callin_map[cpu] = 0; | ||
459 | |||
460 | /* The information for processor bringup must | ||
461 | * be written out to main store before we release | ||
462 | * the processor. | ||
463 | */ | ||
464 | smp_mb(); | ||
465 | |||
466 | /* wake up cpus */ | ||
467 | DBG("smp: kicking cpu %d\n", cpu); | ||
468 | smp_ops->kick_cpu(cpu); | ||
469 | |||
470 | /* | ||
471 | * wait to see if the cpu made a callin (is actually up). | ||
472 | * use this value that I found through experimentation. | ||
473 | * -- Cort | ||
474 | */ | ||
475 | if (system_state < SYSTEM_RUNNING) | ||
476 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) | ||
477 | udelay(100); | ||
478 | #ifdef CONFIG_HOTPLUG_CPU | ||
479 | else | ||
480 | /* | ||
481 | * CPUs can take much longer to come up in the | ||
482 | * hotplug case. Wait five seconds. | ||
483 | */ | ||
484 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | ||
485 | msleep(200); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | if (!cpu_callin_map[cpu]) { | ||
490 | printk("Processor %u is stuck.\n", cpu); | ||
491 | return -ENOENT; | ||
492 | } | ||
493 | |||
494 | printk("Processor %u found.\n", cpu); | ||
495 | |||
496 | if (smp_ops->give_timebase) | ||
497 | smp_ops->give_timebase(); | ||
498 | |||
499 | /* Wait until cpu puts itself in the online map */ | ||
500 | while (!cpu_online(cpu)) | ||
501 | cpu_relax(); | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | |||
507 | /* Activate a secondary processor. */ | ||
508 | int __devinit start_secondary(void *unused) | ||
509 | { | ||
510 | unsigned int cpu = smp_processor_id(); | ||
511 | |||
512 | atomic_inc(&init_mm.mm_count); | ||
513 | current->active_mm = &init_mm; | ||
514 | |||
515 | smp_store_cpu_info(cpu); | ||
516 | set_dec(paca[cpu].default_decr); | ||
517 | cpu_callin_map[cpu] = 1; | ||
518 | |||
519 | smp_ops->setup_cpu(cpu); | ||
520 | if (smp_ops->take_timebase) | ||
521 | smp_ops->take_timebase(); | ||
522 | |||
523 | spin_lock(&call_lock); | ||
524 | cpu_set(cpu, cpu_online_map); | ||
525 | spin_unlock(&call_lock); | ||
526 | |||
527 | local_irq_enable(); | ||
528 | |||
529 | cpu_idle(); | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | int setup_profiling_timer(unsigned int multiplier) | ||
534 | { | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | void __init smp_cpus_done(unsigned int max_cpus) | ||
539 | { | ||
540 | cpumask_t old_mask; | ||
541 | |||
542 | /* We want the setup_cpu() here to be called from CPU 0, but our | ||
543 | * init thread may have been "borrowed" by another CPU in the meantime | ||
544 | * se we pin us down to CPU 0 for a short while | ||
545 | */ | ||
546 | old_mask = current->cpus_allowed; | ||
547 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | ||
548 | |||
549 | smp_ops->setup_cpu(boot_cpuid); | ||
550 | |||
551 | set_cpus_allowed(current, old_mask); | ||
552 | } | ||
553 | |||
554 | #ifdef CONFIG_HOTPLUG_CPU | ||
555 | int __cpu_disable(void) | ||
556 | { | ||
557 | if (smp_ops->cpu_disable) | ||
558 | return smp_ops->cpu_disable(); | ||
559 | |||
560 | return -ENOSYS; | ||
561 | } | ||
562 | |||
563 | void __cpu_die(unsigned int cpu) | ||
564 | { | ||
565 | if (smp_ops->cpu_die) | ||
566 | smp_ops->cpu_die(cpu); | ||
567 | } | ||
568 | #endif | ||
diff --git a/arch/ppc64/kernel/spider-pic.c b/arch/ppc64/kernel/spider-pic.c deleted file mode 100644 index d5c9a02fb119..000000000000 --- a/arch/ppc64/kernel/spider-pic.c +++ /dev/null | |||
@@ -1,191 +0,0 @@ | |||
1 | /* | ||
2 | * External Interrupt Controller on Spider South Bridge | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/irq.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/prom.h> | ||
28 | #include <asm/io.h> | ||
29 | |||
30 | #include "bpa_iic.h" | ||
31 | |||
32 | /* register layout taken from Spider spec, table 7.4-4 */ | ||
33 | enum { | ||
34 | TIR_DEN = 0x004, /* Detection Enable Register */ | ||
35 | TIR_MSK = 0x084, /* Mask Level Register */ | ||
36 | TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ | ||
37 | TIR_PNDA = 0x100, /* Pending Register A */ | ||
38 | TIR_PNDB = 0x104, /* Pending Register B */ | ||
39 | TIR_CS = 0x144, /* Current Status Register */ | ||
40 | TIR_LCSA = 0x150, /* Level Current Status Register A */ | ||
41 | TIR_LCSB = 0x154, /* Level Current Status Register B */ | ||
42 | TIR_LCSC = 0x158, /* Level Current Status Register C */ | ||
43 | TIR_LCSD = 0x15c, /* Level Current Status Register D */ | ||
44 | TIR_CFGA = 0x200, /* Setting Register A0 */ | ||
45 | TIR_CFGB = 0x204, /* Setting Register B0 */ | ||
46 | /* 0x208 ... 0x3ff Setting Register An/Bn */ | ||
47 | TIR_PPNDA = 0x400, /* Packet Pending Register A */ | ||
48 | TIR_PPNDB = 0x404, /* Packet Pending Register B */ | ||
49 | TIR_PIERA = 0x408, /* Packet Output Error Register A */ | ||
50 | TIR_PIERB = 0x40c, /* Packet Output Error Register B */ | ||
51 | TIR_PIEN = 0x444, /* Packet Output Enable Register */ | ||
52 | TIR_PIPND = 0x454, /* Packet Output Pending Register */ | ||
53 | TIRDID = 0x484, /* Spider Device ID Register */ | ||
54 | REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ | ||
55 | REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ | ||
56 | REISWAITEN = 0x508, /* Reissue Wait Control*/ | ||
57 | }; | ||
58 | |||
59 | static void __iomem *spider_pics[4]; | ||
60 | |||
61 | static void __iomem *spider_get_pic(int irq) | ||
62 | { | ||
63 | int node = irq / IIC_NODE_STRIDE; | ||
64 | irq %= IIC_NODE_STRIDE; | ||
65 | |||
66 | if (irq >= IIC_EXT_OFFSET && | ||
67 | irq < IIC_EXT_OFFSET + IIC_NUM_EXT && | ||
68 | spider_pics) | ||
69 | return spider_pics[node]; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static int spider_get_nr(unsigned int irq) | ||
74 | { | ||
75 | return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; | ||
76 | } | ||
77 | |||
78 | static void __iomem *spider_get_irq_config(int irq) | ||
79 | { | ||
80 | void __iomem *pic; | ||
81 | pic = spider_get_pic(irq); | ||
82 | return pic + TIR_CFGA + 8 * spider_get_nr(irq); | ||
83 | } | ||
84 | |||
85 | static void spider_enable_irq(unsigned int irq) | ||
86 | { | ||
87 | void __iomem *cfg = spider_get_irq_config(irq); | ||
88 | irq = spider_get_nr(irq); | ||
89 | |||
90 | out_be32(cfg, in_be32(cfg) | 0x3107000eu); | ||
91 | out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); | ||
92 | } | ||
93 | |||
94 | static void spider_disable_irq(unsigned int irq) | ||
95 | { | ||
96 | void __iomem *cfg = spider_get_irq_config(irq); | ||
97 | irq = spider_get_nr(irq); | ||
98 | |||
99 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | ||
100 | } | ||
101 | |||
102 | static unsigned int spider_startup_irq(unsigned int irq) | ||
103 | { | ||
104 | spider_enable_irq(irq); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void spider_shutdown_irq(unsigned int irq) | ||
109 | { | ||
110 | spider_disable_irq(irq); | ||
111 | } | ||
112 | |||
113 | static void spider_end_irq(unsigned int irq) | ||
114 | { | ||
115 | spider_enable_irq(irq); | ||
116 | } | ||
117 | |||
118 | static void spider_ack_irq(unsigned int irq) | ||
119 | { | ||
120 | spider_disable_irq(irq); | ||
121 | iic_local_enable(); | ||
122 | } | ||
123 | |||
124 | static struct hw_interrupt_type spider_pic = { | ||
125 | .typename = " SPIDER ", | ||
126 | .startup = spider_startup_irq, | ||
127 | .shutdown = spider_shutdown_irq, | ||
128 | .enable = spider_enable_irq, | ||
129 | .disable = spider_disable_irq, | ||
130 | .ack = spider_ack_irq, | ||
131 | .end = spider_end_irq, | ||
132 | }; | ||
133 | |||
134 | |||
135 | int spider_get_irq(unsigned long int_pending) | ||
136 | { | ||
137 | void __iomem *regs = spider_get_pic(int_pending); | ||
138 | unsigned long cs; | ||
139 | int irq; | ||
140 | |||
141 | cs = in_be32(regs + TIR_CS); | ||
142 | |||
143 | irq = cs >> 24; | ||
144 | if (irq != 63) | ||
145 | return irq; | ||
146 | |||
147 | return -1; | ||
148 | } | ||
149 | |||
150 | void spider_init_IRQ(void) | ||
151 | { | ||
152 | int node; | ||
153 | struct device_node *dn; | ||
154 | unsigned int *property; | ||
155 | long spiderpic; | ||
156 | int n; | ||
157 | |||
158 | /* FIXME: detect multiple PICs as soon as the device tree has them */ | ||
159 | for (node = 0; node < 1; node++) { | ||
160 | dn = of_find_node_by_path("/"); | ||
161 | n = prom_n_addr_cells(dn); | ||
162 | property = (unsigned int *) get_property(dn, | ||
163 | "platform-spider-pic", NULL); | ||
164 | |||
165 | if (!property) | ||
166 | continue; | ||
167 | for (spiderpic = 0; n > 0; --n) | ||
168 | spiderpic = (spiderpic << 32) + *property++; | ||
169 | printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic); | ||
170 | spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE); | ||
171 | for (n = 0; n < IIC_NUM_EXT; n++) { | ||
172 | int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | ||
173 | get_irq_desc(irq)->handler = &spider_pic; | ||
174 | |||
175 | /* do not mask any interrupts because of level */ | ||
176 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | ||
177 | |||
178 | /* disable edge detection clear */ | ||
179 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | ||
180 | |||
181 | /* enable interrupt packets to be output */ | ||
182 | out_be32(spider_pics[node] + TIR_PIEN, | ||
183 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | ||
184 | |||
185 | /* Enable the interrupt detection enable bit. Do this last! */ | ||
186 | out_be32(spider_pics[node] + TIR_DEN, | ||
187 | in_be32(spider_pics[node] +TIR_DEN) | 0x1); | ||
188 | |||
189 | } | ||
190 | } | ||
191 | } | ||
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c index 6654b350979c..e99ec62c2c52 100644 --- a/arch/ppc64/kernel/sysfs.c +++ b/arch/ppc64/kernel/sysfs.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/paca.h> | 20 | #include <asm/paca.h> |
21 | #include <asm/lppaca.h> | 21 | #include <asm/lppaca.h> |
22 | #include <asm/machdep.h> | 22 | #include <asm/machdep.h> |
23 | #include <asm/smp.h> | ||
23 | 24 | ||
24 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 25 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
25 | 26 | ||
diff --git a/arch/ppc64/kernel/udbg_scc.c b/arch/ppc64/kernel/udbg_scc.c index c47fd6c63531..820c53551507 100644 --- a/arch/ppc64/kernel/udbg_scc.c +++ b/arch/ppc64/kernel/udbg_scc.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <asm/udbg.h> | 13 | #include <asm/udbg.h> |
14 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
15 | #include <asm/naca.h> | ||
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | #include <asm/prom.h> | 16 | #include <asm/prom.h> |
18 | #include <asm/pmac_feature.h> | 17 | #include <asm/pmac_feature.h> |