aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-11-14 01:30:17 -0500
committerPaul Mackerras <paulus@samba.org>2005-11-14 01:30:17 -0500
commit7568cb4ef6c507164b65b01f972a3bd026898ae1 (patch)
treeba608f4c84d8765d5a1491c345f6dc265b5ec4ea /arch/ppc64
parentc55377ee73f6efeb373ae06f6e918d87660b4852 (diff)
powerpc: Move most remaining ppc64 files over to arch/powerpc
Also deletes files in arch/ppc64 that are no longer used now that we don't compile with ARCH=ppc64 any more. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/Kconfig520
-rw-r--r--arch/ppc64/kernel/Makefile40
-rw-r--r--arch/ppc64/kernel/asm-offsets.c195
-rw-r--r--arch/ppc64/kernel/btext.c792
-rw-r--r--arch/ppc64/kernel/dma.c151
-rw-r--r--arch/ppc64/kernel/head.S2007
-rw-r--r--arch/ppc64/kernel/hvconsole.c74
-rw-r--r--arch/ppc64/kernel/hvcserver.c251
-rw-r--r--arch/ppc64/kernel/iomap.c146
-rw-r--r--arch/ppc64/kernel/iommu.c572
-rw-r--r--arch/ppc64/kernel/kprobes.c459
-rw-r--r--arch/ppc64/kernel/machine_kexec.c358
-rw-r--r--arch/ppc64/kernel/misc.S940
-rw-r--r--arch/ppc64/kernel/module.c455
-rw-r--r--arch/ppc64/kernel/pci.c1319
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c94
-rw-r--r--arch/ppc64/kernel/pci_dn.c230
-rw-r--r--arch/ppc64/kernel/pci_iommu.c128
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c76
-rw-r--r--arch/ppc64/kernel/prom.c1956
-rw-r--r--arch/ppc64/kernel/prom_init.c2051
-rw-r--r--arch/ppc64/kernel/semaphore.c136
-rw-r--r--arch/ppc64/kernel/vdso.c625
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S151
-rw-r--r--arch/ppc64/xmon/privinst.h64
25 files changed, 1 insertions, 13789 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
deleted file mode 100644
index 9d10c12e87fe..000000000000
--- a/arch/ppc64/Kconfig
+++ /dev/null
@@ -1,520 +0,0 @@
1#
2# For a description of the syntax of this configuration file,
3# see Documentation/kbuild/kconfig-language.txt.
4#
5
6config 64BIT
7 def_bool y
8
9config MMU
10 bool
11 default y
12
13config PPC_STD_MMU
14 def_bool y
15
16config UID16
17 bool
18
19config RWSEM_GENERIC_SPINLOCK
20 bool
21
22config RWSEM_XCHGADD_ALGORITHM
23 bool
24 default y
25
26config GENERIC_CALIBRATE_DELAY
27 bool
28 default y
29
30config GENERIC_ISA_DMA
31 bool
32 default y
33
34config EARLY_PRINTK
35 bool
36 default y
37
38config COMPAT
39 bool
40 default y
41
42config SCHED_NO_NO_OMIT_FRAME_POINTER
43 bool
44 default y
45
46config ARCH_MAY_HAVE_PC_FDC
47 bool
48 default y
49
50config PPC_STD_MMU
51 bool
52 default y
53
54# We optimistically allocate largepages from the VM, so make the limit
55# large enough (16MB). This badly named config option is actually
56# max order + 1
57config FORCE_MAX_ZONEORDER
58 int
59 default "9" if PPC_64K_PAGES
60 default "13"
61
62source "init/Kconfig"
63
64config SYSVIPC_COMPAT
65 bool
66 depends on COMPAT && SYSVIPC
67 default y
68
69menu "Platform support"
70
71choice
72 prompt "Platform Type"
73 default PPC_MULTIPLATFORM
74
75config PPC_ISERIES
76 bool "IBM Legacy iSeries"
77
78config PPC_MULTIPLATFORM
79 bool "Generic"
80
81endchoice
82
83config PPC_PSERIES
84 depends on PPC_MULTIPLATFORM
85 bool " IBM pSeries & new iSeries"
86 default y
87
88config PPC_BPA
89 bool " Broadband Processor Architecture"
90 depends on PPC_MULTIPLATFORM
91
92config PPC_PMAC
93 depends on PPC_MULTIPLATFORM
94 bool " Apple G5 based machines"
95 default y
96 select U3_DART
97 select GENERIC_TBSYNC
98
99config PPC_MAPLE
100 depends on PPC_MULTIPLATFORM
101 bool " Maple 970FX Evaluation Board"
102 select U3_DART
103 select MPIC_BROKEN_U3
104 select GENERIC_TBSYNC
105 default n
106 help
107 This option enables support for the Maple 970FX Evaluation Board.
108 For more informations, refer to <http://www.970eval.com>
109
110config PPC
111 bool
112 default y
113
114config PPC64
115 bool
116 default y
117
118config PPC_OF
119 depends on PPC_MULTIPLATFORM
120 bool
121 default y
122
123config XICS
124 depends on PPC_PSERIES
125 bool
126 default y
127
128config MPIC
129 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
130 bool
131 default y
132
133config PPC_I8259
134 depends on PPC_PSERIES
135 bool
136 default y
137
138config BPA_IIC
139 depends on PPC_BPA
140 bool
141 default y
142
143# VMX is pSeries only for now until somebody writes the iSeries
144# exception vectors for it
145config ALTIVEC
146 bool "Support for VMX (Altivec) vector unit"
147 depends on PPC_MULTIPLATFORM
148 default y
149
150config PPC_SPLPAR
151 depends on PPC_PSERIES
152 bool "Support for shared-processor logical partitions"
153 default n
154 help
155 Enabling this option will make the kernel run more efficiently
156 on logically-partitioned pSeries systems which use shared
157 processors, that is, which share physical processors between
158 two or more partitions.
159
160config KEXEC
161 bool "kexec system call (EXPERIMENTAL)"
162 depends on PPC_MULTIPLATFORM && EXPERIMENTAL
163 help
164 kexec is a system call that implements the ability to shutdown your
165 current kernel, and to start another kernel. It is like a reboot
166 but it is indepedent of the system firmware. And like a reboot
167 you can start any kernel with it, not just Linux.
168
169 The name comes from the similiarity to the exec system call.
170
171 It is an ongoing process to be certain the hardware in a machine
172 is properly shutdown, so do not be surprised if this code does not
173 initially work for you. It may help to enable device hotplugging
174 support. As of this writing the exact hardware interface is
175 strongly in flux, so no good recommendation can be made.
176
177source "drivers/cpufreq/Kconfig"
178
179config CPU_FREQ_PMAC64
180 bool "Support for some Apple G5s"
181 depends on CPU_FREQ && PMAC_SMU && PPC64
182 select CPU_FREQ_TABLE
183 help
184 This adds support for frequency switching on Apple iMac G5,
185 and some of the more recent desktop G5 machines as well.
186
187config IBMVIO
188 depends on PPC_PSERIES || PPC_ISERIES
189 bool
190 default y
191
192config U3_DART
193 bool
194 depends on PPC_MULTIPLATFORM
195 default n
196
197config MPIC_BROKEN_U3
198 bool
199 depends on PPC_MAPLE
200 default y
201
202config GENERIC_TBSYNC
203 def_bool n
204
205config PPC_PMAC64
206 bool
207 depends on PPC_PMAC
208 default y
209
210config BOOTX_TEXT
211 bool "Support for early boot text console"
212 depends PPC_OF
213 help
214 Say Y here to see progress messages from the boot firmware in text
215 mode. Requires an Open Firmware compatible video card.
216
217config POWER4
218 def_bool y
219
220config PPC_FPU
221 def_bool y
222
223config POWER4_ONLY
224 bool "Optimize for POWER4"
225 default n
226 ---help---
227 Cause the compiler to optimize for POWER4 processors. The resulting
228 binary will not work on POWER3 or RS64 processors when compiled with
229 binutils 2.15 or later.
230
231config IOMMU_VMERGE
232 bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
233 depends on EXPERIMENTAL
234 default n
235 help
236 Cause IO segments sent to a device for DMA to be merged virtually
237 by the IOMMU when they happen to have been allocated contiguously.
238 This doesn't add pressure to the IOMMU allocator. However, some
239 drivers don't support getting large merged segments coming back
240 from *_map_sg(). Say Y if you know the drivers you are using are
241 properly handling this case.
242
243config SMP
244 bool "Symmetric multi-processing support"
245 ---help---
246 This enables support for systems with more than one CPU. If you have
247 a system with only one CPU, say N. If you have a system with more
248 than one CPU, say Y.
249
250 If you say N here, the kernel will run on single and multiprocessor
251 machines, but will use only one CPU of a multiprocessor machine. If
252 you say Y here, the kernel will run on single-processor machines.
253 On a single-processor machine, the kernel will run faster if you say
254 N here.
255
256 If you don't know what to do here, say Y.
257
258config NR_CPUS
259 int "Maximum number of CPUs (2-128)"
260 range 2 128
261 depends on SMP
262 default "32"
263
264config HMT
265 bool "Hardware multithreading"
266 depends on SMP && PPC_PSERIES && BROKEN
267 help
268 This option enables hardware multithreading on RS64 cpus.
269 pSeries systems p620 and p660 have such a cpu type.
270
271config NUMA
272 bool "NUMA support"
273 default y if SMP && PPC_PSERIES
274
275config ARCH_SELECT_MEMORY_MODEL
276 def_bool y
277
278config ARCH_FLATMEM_ENABLE
279 def_bool y
280 depends on !NUMA
281
282config ARCH_SPARSEMEM_ENABLE
283 def_bool y
284
285config ARCH_SPARSEMEM_DEFAULT
286 def_bool y
287 depends on NUMA
288
289source "mm/Kconfig"
290
291config HAVE_ARCH_EARLY_PFN_TO_NID
292 def_bool y
293 depends on NEED_MULTIPLE_NODES
294
295config ARCH_MEMORY_PROBE
296 def_bool y
297 depends on MEMORY_HOTPLUG
298
299# Some NUMA nodes have memory ranges that span
300# other nodes. Even though a pfn is valid and
301# between a node's start and end pfns, it may not
302# reside on that node.
303#
304# This is a relatively temporary hack that should
305# be able to go away when sparsemem is fully in
306# place
307config NODES_SPAN_OTHER_NODES
308 def_bool y
309 depends on NEED_MULTIPLE_NODES
310
311config PPC_64K_PAGES
312 bool "64k page size"
313 help
314 This option changes the kernel logical page size to 64k. On machines
315 without processor support for 64k pages, the kernel will simulate
316 them by loading each individual 4k page on demand transparently,
317 while on hardware with such support, it will be used to map
318 normal application pages.
319
320config SCHED_SMT
321 bool "SMT (Hyperthreading) scheduler support"
322 depends on SMP
323 default off
324 help
325 SMT scheduler support improves the CPU scheduler's decision making
326 when dealing with POWER5 cpus at a cost of slightly increased
327 overhead in some places. If unsure say N here.
328
329source "kernel/Kconfig.preempt"
330source kernel/Kconfig.hz
331
332config EEH
333 bool "PCI Extended Error Handling (EEH)" if EMBEDDED
334 depends on PPC_PSERIES
335 default y if !EMBEDDED
336
337#
338# Use the generic interrupt handling code in kernel/irq/:
339#
340config GENERIC_HARDIRQS
341 bool
342 default y
343
344config PPC_RTAS
345 bool
346 depends on PPC_PSERIES || PPC_BPA
347 default y
348
349config RTAS_ERROR_LOGGING
350 bool
351 depends on PPC_RTAS
352 default y
353
354config RTAS_PROC
355 bool "Proc interface to RTAS"
356 depends on PPC_RTAS
357 default y
358
359config RTAS_FLASH
360 tristate "Firmware flash interface"
361 depends on RTAS_PROC
362
363config SCANLOG
364 tristate "Scanlog dump interface"
365 depends on RTAS_PROC && PPC_PSERIES
366
367config LPARCFG
368 tristate "LPAR Configuration Data"
369 depends on PPC_PSERIES || PPC_ISERIES
370 help
371 Provide system capacity information via human readable
372 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
373
374config SECCOMP
375 bool "Enable seccomp to safely compute untrusted bytecode"
376 depends on PROC_FS
377 default y
378 help
379 This kernel feature is useful for number crunching applications
380 that may need to compute untrusted bytecode during their
381 execution. By using pipes or other transports made available to
382 the process as file descriptors supporting the read/write
383 syscalls, it's possible to isolate those applications in
384 their own address space using seccomp. Once seccomp is
385 enabled via /proc/<pid>/seccomp, it cannot be disabled
386 and the task is only allowed to execute a few safe syscalls
387 defined by each seccomp mode.
388
389 If unsure, say Y. Only embedded should say N here.
390
391source "fs/Kconfig.binfmt"
392
393config HOTPLUG_CPU
394 bool "Support for hot-pluggable CPUs"
395 depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
396 select HOTPLUG
397 ---help---
398 Say Y here to be able to turn CPUs off and on.
399
400 Say N if you are unsure.
401
402config PROC_DEVICETREE
403 bool "Support for Open Firmware device tree in /proc"
404 help
405 This option adds a device-tree directory under /proc which contains
406 an image of the device tree that the kernel copies from Open
407 Firmware. If unsure, say Y here.
408
409config CMDLINE_BOOL
410 bool "Default bootloader kernel arguments"
411 depends on !PPC_ISERIES
412
413config CMDLINE
414 string "Initial kernel command string"
415 depends on CMDLINE_BOOL
416 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
417 help
418 On some platforms, there is currently no way for the boot loader to
419 pass arguments to the kernel. For these platforms, you can supply
420 some command-line options at build time by entering them here. In
421 most cases you will need to specify the root device here.
422
423endmenu
424
425config ISA_DMA_API
426 bool
427 default y
428
429menu "Bus Options"
430
431config ISA
432 bool
433 help
434 Find out whether you have ISA slots on your motherboard. ISA is the
435 name of a bus system, i.e. the way the CPU talks to the other stuff
436 inside your box. If you have an Apple machine, say N here; if you
437 have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
438 you have an embedded board, consult your board documentation.
439
440config SBUS
441 bool
442
443config MCA
444 bool
445
446config EISA
447 bool
448
449config PCI
450 bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES)
451 default y
452 help
453 Find out whether your system includes a PCI bus. PCI is the name of
454 a bus system, i.e. the way the CPU talks to the other stuff inside
455 your box. If you say Y here, the kernel will include drivers and
456 infrastructure code to support PCI bus devices.
457
458config PCI_DOMAINS
459 bool
460 default PCI
461
462source "drivers/pci/Kconfig"
463
464source "drivers/pcmcia/Kconfig"
465
466source "drivers/pci/hotplug/Kconfig"
467
468endmenu
469
470source "net/Kconfig"
471
472source "drivers/Kconfig"
473
474source "fs/Kconfig"
475
476menu "iSeries device drivers"
477 depends on PPC_ISERIES
478
479config VIOCONS
480 tristate "iSeries Virtual Console Support"
481
482config VIODASD
483 tristate "iSeries Virtual I/O disk support"
484 help
485 If you are running on an iSeries system and you want to use
486 virtual disks created and managed by OS/400, say Y.
487
488config VIOCD
489 tristate "iSeries Virtual I/O CD support"
490 help
491 If you are running Linux on an IBM iSeries system and you want to
492 read a CD drive owned by OS/400, say Y here.
493
494config VIOTAPE
495 tristate "iSeries Virtual Tape Support"
496 help
497 If you are running Linux on an iSeries system and you want Linux
498 to read and/or write a tape drive owned by OS/400, say Y here.
499
500endmenu
501
502config VIOPATH
503 bool
504 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
505 default y
506
507source "arch/powerpc/oprofile/Kconfig"
508
509source "arch/ppc64/Kconfig.debug"
510
511source "security/Kconfig"
512
513config KEYS_COMPAT
514 bool
515 depends on COMPAT && KEYS
516 default y
517
518source "crypto/Kconfig"
519
520source "lib/Kconfig"
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index d0edea503c49..e876c213f5ce 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -2,44 +2,6 @@
2# Makefile for the linux ppc64 kernel. 2# Makefile for the linux ppc64 kernel.
3# 3#
4 4
5ifneq ($(CONFIG_PPC_MERGE),y) 5obj-y += idle.o align.o
6
7EXTRA_CFLAGS += -mno-minimal-toc
8extra-y := head.o vmlinux.lds
9
10obj-y := misc.o prom.o
11
12endif
13
14obj-y += idle.o dma.o \
15 align.o \
16 iommu.o
17
18pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
19
20obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
21 6
22obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o 7obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
23ifneq ($(CONFIG_PPC_MERGE),y)
24obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
25endif
26
27obj-$(CONFIG_KEXEC) += machine_kexec.o
28obj-$(CONFIG_MODULES) += module.o
29ifneq ($(CONFIG_PPC_MERGE),y)
30obj-$(CONFIG_MODULES) += ppc_ksyms.o
31endif
32obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
33ifneq ($(CONFIG_PPC_MERGE),y)
34obj-$(CONFIG_BOOTX_TEXT) += btext.o
35endif
36obj-$(CONFIG_HVCS) += hvcserver.o
37
38obj-$(CONFIG_KPROBES) += kprobes.o
39
40ifneq ($(CONFIG_PPC_MERGE),y)
41ifeq ($(CONFIG_PPC_ISERIES),y)
42arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
43AFLAGS_head.o += -Iarch/powerpc/kernel
44endif
45endif
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
deleted file mode 100644
index 84ab5c18ef52..000000000000
--- a/arch/ppc64/kernel/asm-offsets.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/time.h>
26#include <linux/hardirq.h>
27#include <asm/io.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31
32#include <asm/paca.h>
33#include <asm/lppaca.h>
34#include <asm/iseries/hv_lp_event.h>
35#include <asm/rtas.h>
36#include <asm/cputable.h>
37#include <asm/cache.h>
38#include <asm/systemcfg.h>
39#include <asm/compat.h>
40
41#define DEFINE(sym, val) \
42 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
43
44#define BLANK() asm volatile("\n->" : : )
45
46int main(void)
47{
48 /* thread struct on stack */
49 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
50 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
51 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
52
53 /* task_struct->thread */
54 DEFINE(THREAD, offsetof(struct task_struct, thread));
55 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
56 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
57 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
58 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
59 DEFINE(KSP, offsetof(struct thread_struct, ksp));
60 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
61
62#ifdef CONFIG_ALTIVEC
63 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
64 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
65 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
66 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
67#endif /* CONFIG_ALTIVEC */
68 DEFINE(MM, offsetof(struct task_struct, mm));
69 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
70
71 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
72 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
73 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
74 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
75 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
76 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
77 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
78
79 /* paca */
80 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
81 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
82 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
83 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
84 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
85 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
86 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
87 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
88 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
89 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
90 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
91 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
92 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
93 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
94 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
95#ifdef CONFIG_PPC_64K_PAGES
96 DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
97#endif
98#ifdef CONFIG_HUGETLB_PAGE
99 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
100 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
101#endif /* CONFIG_HUGETLB_PAGE */
102 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
103 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
104 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
105 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
106 DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
107 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
108 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
109 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
110 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
111 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
112 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
113 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
114
115 /* RTAS */
116 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
117 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
118
119 /* Interrupt register frame */
120 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
121
122 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
123
124 /* 288 = # of volatile regs, int & fp, for leaf routines */
125 /* which do not stack a frame. See the PPC64 ABI. */
126 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
127 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
128 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
129 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
130 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
131 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
132 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
133 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
134 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
135 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
136 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
137 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
138 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
139 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
140 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
141 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
142 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
143 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
144 /*
145 * Note: these symbols include _ because they overlap with special
146 * register names
147 */
148 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
149 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
150 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
151 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
152 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
153 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
154 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
155 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
156 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
157 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
158 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
159 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
160
161 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
162 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
163 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
164
165 DEFINE(CLONE_VM, CLONE_VM);
166 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
167
168 /* About the CPU features table */
169 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
170 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
171 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
172 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
173 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
174
175 /* systemcfg offsets for use by vdso */
176 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
177 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
178 DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
179 DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
180 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
181 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
182 DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
183 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
184 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
185
186 /* timeval/timezone offsets for use by vdso */
187 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
188 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
189 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
190 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
191 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
192 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
193
194 return 0;
195}
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
deleted file mode 100644
index 506a37885c5c..000000000000
--- a/arch/ppc64/kernel/btext.c
+++ /dev/null
@@ -1,792 +0,0 @@
1/*
2 * Procedures for drawing on the screen early on in the boot process.
3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/init.h>
10
11#include <asm/sections.h>
12#include <asm/prom.h>
13#include <asm/btext.h>
14#include <asm/prom.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/pgtable.h>
18#include <asm/io.h>
19#include <asm/lmb.h>
20#include <asm/processor.h>
21#include <asm/udbg.h>
22
23#undef NO_SCROLL
24
25#ifndef NO_SCROLL
26static void scrollscreen(void);
27#endif
28
29static void draw_byte(unsigned char c, long locX, long locY);
30static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
32static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
33
34static int g_loc_X;
35static int g_loc_Y;
36static int g_max_loc_X;
37static int g_max_loc_Y;
38
39static int dispDeviceRowBytes;
40static int dispDeviceDepth;
41static int dispDeviceRect[4];
42static unsigned char *dispDeviceBase, *logicalDisplayBase;
43
44unsigned long disp_BAT[2] __initdata = {0, 0};
45
46#define cmapsz (16*256)
47
48static unsigned char vga_font[cmapsz];
49
50int boot_text_mapped;
51int force_printk_to_btext = 0;
52
53
54/* Here's a small text engine to use during early boot
55 * or for debugging purposes
56 *
57 * todo:
58 *
59 * - build some kind of vgacon with it to enable early printk
60 * - move to a separate file
61 * - add a few video driver hooks to keep in sync with display
62 * changes.
63 */
64
65void map_boot_text(void)
66{
67 unsigned long base, offset, size;
68 unsigned char *vbase;
69
70 /* By default, we are no longer mapped */
71 boot_text_mapped = 0;
72 if (dispDeviceBase == 0)
73 return;
74 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
75 offset = ((unsigned long) dispDeviceBase) - base;
76 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
77 + dispDeviceRect[0];
78 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
79 if (vbase == 0)
80 return;
81 logicalDisplayBase = vbase + offset;
82 boot_text_mapped = 1;
83}
84
85int btext_initialize(struct device_node *np)
86{
87 unsigned int width, height, depth, pitch;
88 unsigned long address = 0;
89 u32 *prop;
90
91 prop = (u32 *)get_property(np, "width", NULL);
92 if (prop == NULL)
93 return -EINVAL;
94 width = *prop;
95 prop = (u32 *)get_property(np, "height", NULL);
96 if (prop == NULL)
97 return -EINVAL;
98 height = *prop;
99 prop = (u32 *)get_property(np, "depth", NULL);
100 if (prop == NULL)
101 return -EINVAL;
102 depth = *prop;
103 pitch = width * ((depth + 7) / 8);
104 prop = (u32 *)get_property(np, "linebytes", NULL);
105 if (prop)
106 pitch = *prop;
107 if (pitch == 1)
108 pitch = 0x1000;
109 prop = (u32 *)get_property(np, "address", NULL);
110 if (prop)
111 address = *prop;
112
113 /* FIXME: Add support for PCI reg properties */
114
115 if (address == 0)
116 return -EINVAL;
117
118 g_loc_X = 0;
119 g_loc_Y = 0;
120 g_max_loc_X = width / 8;
121 g_max_loc_Y = height / 16;
122 logicalDisplayBase = (unsigned char *)address;
123 dispDeviceBase = (unsigned char *)address;
124 dispDeviceRowBytes = pitch;
125 dispDeviceDepth = depth;
126 dispDeviceRect[0] = dispDeviceRect[1] = 0;
127 dispDeviceRect[2] = width;
128 dispDeviceRect[3] = height;
129
130 map_boot_text();
131
132 return 0;
133}
134
135static void btext_putc(unsigned char c)
136{
137 btext_drawchar(c);
138}
139
140void __init init_boot_display(void)
141{
142 char *name;
143 struct device_node *np = NULL;
144 int rc = -ENODEV;
145
146 printk("trying to initialize btext ...\n");
147
148 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
149 if (name != NULL) {
150 np = of_find_node_by_path(name);
151 if (np != NULL) {
152 if (strcmp(np->type, "display") != 0) {
153 printk("boot stdout isn't a display !\n");
154 of_node_put(np);
155 np = NULL;
156 }
157 }
158 }
159 if (np)
160 rc = btext_initialize(np);
161 if (rc) {
162 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
163 if (get_property(np, "linux,opened", NULL)) {
164 printk("trying %s ...\n", np->full_name);
165 rc = btext_initialize(np);
166 printk("result: %d\n", rc);
167 }
168 if (rc == 0)
169 break;
170 }
171 }
172 if (rc == 0 && udbg_putc == NULL)
173 udbg_putc = btext_putc;
174}
175
176
177/* Calc the base address of a given point (x,y) */
178static unsigned char * calc_base(int x, int y)
179{
180 unsigned char *base;
181
182 base = logicalDisplayBase;
183 if (base == 0)
184 base = dispDeviceBase;
185 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
186 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
187 return base;
188}
189
190/* Adjust the display to a new resolution */
191void btext_update_display(unsigned long phys, int width, int height,
192 int depth, int pitch)
193{
194 if (dispDeviceBase == 0)
195 return;
196
197 /* check it's the same frame buffer (within 256MB) */
198 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
199 return;
200
201 dispDeviceBase = (__u8 *) phys;
202 dispDeviceRect[0] = 0;
203 dispDeviceRect[1] = 0;
204 dispDeviceRect[2] = width;
205 dispDeviceRect[3] = height;
206 dispDeviceDepth = depth;
207 dispDeviceRowBytes = pitch;
208 if (boot_text_mapped) {
209 iounmap(logicalDisplayBase);
210 boot_text_mapped = 0;
211 }
212 map_boot_text();
213 g_loc_X = 0;
214 g_loc_Y = 0;
215 g_max_loc_X = width / 8;
216 g_max_loc_Y = height / 16;
217}
218
219void btext_clearscreen(void)
220{
221 unsigned long *base = (unsigned long *)calc_base(0, 0);
222 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
223 (dispDeviceDepth >> 3)) >> 3;
224 int i,j;
225
226 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
227 {
228 unsigned long *ptr = base;
229 for(j=width; j; --j)
230 *(ptr++) = 0;
231 base += (dispDeviceRowBytes >> 3);
232 }
233}
234
235#ifndef NO_SCROLL
236static void scrollscreen(void)
237{
238 unsigned long *src = (unsigned long *)calc_base(0,16);
239 unsigned long *dst = (unsigned long *)calc_base(0,0);
240 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
241 (dispDeviceDepth >> 3)) >> 3;
242 int i,j;
243
244 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
245 {
246 unsigned long *src_ptr = src;
247 unsigned long *dst_ptr = dst;
248 for(j=width; j; --j)
249 *(dst_ptr++) = *(src_ptr++);
250 src += (dispDeviceRowBytes >> 3);
251 dst += (dispDeviceRowBytes >> 3);
252 }
253 for (i=0; i<16; i++)
254 {
255 unsigned long *dst_ptr = dst;
256 for(j=width; j; --j)
257 *(dst_ptr++) = 0;
258 dst += (dispDeviceRowBytes >> 3);
259 }
260}
261#endif /* ndef NO_SCROLL */
262
263void btext_drawchar(char c)
264{
265 int cline = 0;
266#ifdef NO_SCROLL
267 int x;
268#endif
269 if (!boot_text_mapped)
270 return;
271
272 switch (c) {
273 case '\b':
274 if (g_loc_X > 0)
275 --g_loc_X;
276 break;
277 case '\t':
278 g_loc_X = (g_loc_X & -8) + 8;
279 break;
280 case '\r':
281 g_loc_X = 0;
282 break;
283 case '\n':
284 g_loc_X = 0;
285 g_loc_Y++;
286 cline = 1;
287 break;
288 default:
289 draw_byte(c, g_loc_X++, g_loc_Y);
290 }
291 if (g_loc_X >= g_max_loc_X) {
292 g_loc_X = 0;
293 g_loc_Y++;
294 cline = 1;
295 }
296#ifndef NO_SCROLL
297 while (g_loc_Y >= g_max_loc_Y) {
298 scrollscreen();
299 g_loc_Y--;
300 }
301#else
302 /* wrap around from bottom to top of screen so we don't
303 waste time scrolling each line. -- paulus. */
304 if (g_loc_Y >= g_max_loc_Y)
305 g_loc_Y = 0;
306 if (cline) {
307 for (x = 0; x < g_max_loc_X; ++x)
308 draw_byte(' ', x, g_loc_Y);
309 }
310#endif
311}
312
313void btext_drawstring(const char *c)
314{
315 if (!boot_text_mapped)
316 return;
317 while (*c)
318 btext_drawchar(*c++);
319}
320
321void btext_drawhex(unsigned long v)
322{
323 char *hex_table = "0123456789abcdef";
324
325 if (!boot_text_mapped)
326 return;
327 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
328 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
329 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
330 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
331 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
332 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
333 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
334 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
335 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
336 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
337 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
338 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
339 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
340 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
341 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
342 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
343 btext_drawchar(' ');
344}
345
346static void draw_byte(unsigned char c, long locX, long locY)
347{
348 unsigned char *base = calc_base(locX << 3, locY << 4);
349 unsigned char *font = &vga_font[((unsigned int)c) * 16];
350 int rb = dispDeviceRowBytes;
351
352 switch(dispDeviceDepth) {
353 case 24:
354 case 32:
355 draw_byte_32(font, (unsigned int *)base, rb);
356 break;
357 case 15:
358 case 16:
359 draw_byte_16(font, (unsigned int *)base, rb);
360 break;
361 case 8:
362 draw_byte_8(font, (unsigned int *)base, rb);
363 break;
364 }
365}
366
367static unsigned int expand_bits_8[16] = {
368 0x00000000,
369 0x000000ff,
370 0x0000ff00,
371 0x0000ffff,
372 0x00ff0000,
373 0x00ff00ff,
374 0x00ffff00,
375 0x00ffffff,
376 0xff000000,
377 0xff0000ff,
378 0xff00ff00,
379 0xff00ffff,
380 0xffff0000,
381 0xffff00ff,
382 0xffffff00,
383 0xffffffff
384};
385
386static unsigned int expand_bits_16[4] = {
387 0x00000000,
388 0x0000ffff,
389 0xffff0000,
390 0xffffffff
391};
392
393
394static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
395{
396 int l, bits;
397 int fg = 0xFFFFFFFFUL;
398 int bg = 0x00000000UL;
399
400 for (l = 0; l < 16; ++l)
401 {
402 bits = *font++;
403 base[0] = (-(bits >> 7) & fg) ^ bg;
404 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
405 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
406 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
407 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
408 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
409 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
410 base[7] = (-(bits & 1) & fg) ^ bg;
411 base = (unsigned int *) ((char *)base + rb);
412 }
413}
414
415static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
416{
417 int l, bits;
418 int fg = 0xFFFFFFFFUL;
419 int bg = 0x00000000UL;
420 unsigned int *eb = (int *)expand_bits_16;
421
422 for (l = 0; l < 16; ++l)
423 {
424 bits = *font++;
425 base[0] = (eb[bits >> 6] & fg) ^ bg;
426 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
427 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
428 base[3] = (eb[bits & 3] & fg) ^ bg;
429 base = (unsigned int *) ((char *)base + rb);
430 }
431}
432
433static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
434{
435 int l, bits;
436 int fg = 0x0F0F0F0FUL;
437 int bg = 0x00000000UL;
438 unsigned int *eb = (int *)expand_bits_8;
439
440 for (l = 0; l < 16; ++l)
441 {
442 bits = *font++;
443 base[0] = (eb[bits >> 4] & fg) ^ bg;
444 base[1] = (eb[bits & 0xf] & fg) ^ bg;
445 base = (unsigned int *) ((char *)base + rb);
446 }
447}
448
449static unsigned char vga_font[cmapsz] = {
4500x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
4520x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
4530xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
4540x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
4550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
4560x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
4570x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
4580x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
4590x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
4600x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
4610xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
4620x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
4630x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
4640xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
4650x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
4660x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
4670x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
4680x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
4690x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
4700x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
4710x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
4720xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
4730x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
4740x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
4750x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
4760x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
4770xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
4780x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
4790x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4800xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
4810x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
4820x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
4830x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
4840x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4850x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4860x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
4870x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
4880xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4890x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4900x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
4910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
4920x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4940x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
4950x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
4960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
4970x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
4980x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
4990x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
5000x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
5010x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
5020x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5030x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
5040x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
5050x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5060x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
5070x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
5080x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5090x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
5100x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
5110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5120x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5130x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
5140x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
5150x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
5160x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5170x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
5180x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
5190x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
5200x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
5210xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5220x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5230x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
5240x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5250xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5260x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
5270x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
5280x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5290x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5300x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
5310x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
5320x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
5330x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
5340x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5350x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
5360xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
5370x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
5380x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
5390x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
5400xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
5410x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
5420x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
5430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
5440x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
5450xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
5460x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
5470x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
5480x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
5490x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
5500x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
5510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
5520x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
5530xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
5540x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
5550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
5560xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
5570x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
5580x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
5590x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
5600x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5610xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5620x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
5630x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
5640xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
5650xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
5660x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
5670x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
5680x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
5690xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
5700x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
5710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
5720x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
5730xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
5740x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
5750x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
5760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5770x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
5780x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5790x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
5800xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
5810x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
5820x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
5830x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
5840xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5850x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5860x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
5870x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
5880xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
5890x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
5900x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
5910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
5920x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
5930x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
5940x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
5950x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
5960xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5970x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
5980x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5990x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
6000x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
6010x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
6020x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
6030x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
6040x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
6050x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
6060x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6070x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
6080xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6090x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
6100x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
6110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
6120xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
6130x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
6140x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
6150x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
6160x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
6170x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
6180x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6190x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
6200xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6210xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
6220x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6230x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
6240xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
6250x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6260x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
6270x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
6280xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
6290x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6300x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
6310x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
6320xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6330x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6340x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
6350x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
6360x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
6370x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6380x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6390x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
6400xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
6410x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
6420x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
6430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
6440x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
6450xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
6460x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6470x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
6480xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
6490x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6500x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6510x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
6520xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6530x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
6540x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6550x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6560xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
6570xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
6580x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
6590x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
6600xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
6610x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
6620x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
6630xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
6640xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
6650x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6660x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6670x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
6680xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
6690x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
6700x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
6710x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
6720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
6730x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6740x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
6750x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
6760xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6770x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
6780x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
6790x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
6800x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
6810x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
6820x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
6830x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
6840x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
6850x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
6860x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
6870x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
6880xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
6890x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
6900x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
6910x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
6920x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
6930x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
6940x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
6950x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
6960x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
6970x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
6980x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
6990x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
7000x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7010x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7020x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
7030x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7050x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7060x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
7070x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
7080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7090x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7100x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7110x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7130x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7140x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7150x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
7160x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7170x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7180x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7190x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
7200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7210x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7220x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7230x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
7240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
7250x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7260x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
7270x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
7280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7290x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7300x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
7310x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
7320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7330x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7340x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7350x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
7360x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7370x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7380x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
7390x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
7400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7410x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7420xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
7430xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7440xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
7450xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
7460x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
7470x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
7480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7490x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
7500x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
7510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
7520xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7530xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
7540x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
7550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
7560xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7570x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
7580x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7590x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
7600x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
7610x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
7620x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
7630x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
7640x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7650x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7660x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
7670x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
7680x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
7690xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
7700x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
7710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
7720x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
7730x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
7740x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
7750x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
7760x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7770x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
7780x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
7790x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
7800x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
7810x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7820x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
7830x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7840x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
7850x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
7860x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
7870x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
7880x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7890x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
7900x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7910x00, 0x00, 0x00, 0x00,
792};
diff --git a/arch/ppc64/kernel/dma.c b/arch/ppc64/kernel/dma.c
deleted file mode 100644
index 7c3419656ccc..000000000000
--- a/arch/ppc64/kernel/dma.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Implements the generic device dma API for ppc64. Handles
5 * the pci and vio busses
6 */
7
8#include <linux/device.h>
9#include <linux/dma-mapping.h>
10/* Include the busses we support */
11#include <linux/pci.h>
12#include <asm/vio.h>
13#include <asm/scatterlist.h>
14#include <asm/bug.h>
15
16static struct dma_mapping_ops *get_dma_ops(struct device *dev)
17{
18#ifdef CONFIG_PCI
19 if (dev->bus == &pci_bus_type)
20 return &pci_dma_ops;
21#endif
22#ifdef CONFIG_IBMVIO
23 if (dev->bus == &vio_bus_type)
24 return &vio_dma_ops;
25#endif
26 return NULL;
27}
28
29int dma_supported(struct device *dev, u64 mask)
30{
31 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
32
33 if (dma_ops)
34 return dma_ops->dma_supported(dev, mask);
35 BUG();
36 return 0;
37}
38EXPORT_SYMBOL(dma_supported);
39
40int dma_set_mask(struct device *dev, u64 dma_mask)
41{
42#ifdef CONFIG_PCI
43 if (dev->bus == &pci_bus_type)
44 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
45#endif
46#ifdef CONFIG_IBMVIO
47 if (dev->bus == &vio_bus_type)
48 return -EIO;
49#endif /* CONFIG_IBMVIO */
50 BUG();
51 return 0;
52}
53EXPORT_SYMBOL(dma_set_mask);
54
55void *dma_alloc_coherent(struct device *dev, size_t size,
56 dma_addr_t *dma_handle, gfp_t flag)
57{
58 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
59
60 if (dma_ops)
61 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
62 BUG();
63 return NULL;
64}
65EXPORT_SYMBOL(dma_alloc_coherent);
66
67void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
68 dma_addr_t dma_handle)
69{
70 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
71
72 if (dma_ops)
73 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
74 else
75 BUG();
76}
77EXPORT_SYMBOL(dma_free_coherent);
78
79dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
80 enum dma_data_direction direction)
81{
82 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
83
84 if (dma_ops)
85 return dma_ops->map_single(dev, cpu_addr, size, direction);
86 BUG();
87 return (dma_addr_t)0;
88}
89EXPORT_SYMBOL(dma_map_single);
90
91void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
92 enum dma_data_direction direction)
93{
94 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
95
96 if (dma_ops)
97 dma_ops->unmap_single(dev, dma_addr, size, direction);
98 else
99 BUG();
100}
101EXPORT_SYMBOL(dma_unmap_single);
102
103dma_addr_t dma_map_page(struct device *dev, struct page *page,
104 unsigned long offset, size_t size,
105 enum dma_data_direction direction)
106{
107 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
108
109 if (dma_ops)
110 return dma_ops->map_single(dev,
111 (page_address(page) + offset), size, direction);
112 BUG();
113 return (dma_addr_t)0;
114}
115EXPORT_SYMBOL(dma_map_page);
116
117void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
118 enum dma_data_direction direction)
119{
120 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
121
122 if (dma_ops)
123 dma_ops->unmap_single(dev, dma_address, size, direction);
124 else
125 BUG();
126}
127EXPORT_SYMBOL(dma_unmap_page);
128
129int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
130 enum dma_data_direction direction)
131{
132 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
133
134 if (dma_ops)
135 return dma_ops->map_sg(dev, sg, nents, direction);
136 BUG();
137 return 0;
138}
139EXPORT_SYMBOL(dma_map_sg);
140
141void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
142 enum dma_data_direction direction)
143{
144 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
145
146 if (dma_ops)
147 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
148 else
149 BUG();
150}
151EXPORT_SYMBOL(dma_unmap_sg);
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
deleted file mode 100644
index 1c869ea72d28..000000000000
--- a/arch/ppc64/kernel/head.S
+++ /dev/null
@@ -1,2007 +0,0 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/bug.h>
34#include <asm/cputable.h>
35#include <asm/setup.h>
36#include <asm/hvcall.h>
37#include <asm/iseries/lpar_map.h>
38#include <asm/thread_info.h>
39
40#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE
42#endif
43
44/*
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 - : Early init and support code
52 */
53
54/*
55 * SPRG Usage
56 *
57 * Register Definition
58 *
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
63 */
64
65/*
66 * Entering into this code we make the following assumptions:
67 * For pSeries:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
70 *
71 * For iSeries:
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
74 */
75
76 .text
77 .globl _stext
78_stext:
79#ifdef CONFIG_PPC_MULTIPLATFORM
80_GLOBAL(__start)
81 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */
86
87 /* Catch branch to 0 in real mode */
88 trap
89
90#ifdef CONFIG_PPC_ISERIES
91 /*
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
94 */
95 . = 0x20
96 .llong hvReleaseData-KERNELBASE
97
98 /*
99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
103 */
104 .llong mschunks_map-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
106
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109embedded_sysmap_start:
110 .llong 0
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
113embedded_sysmap_end:
114 .llong 0
115
116#endif /* CONFIG_PPC_ISERIES */
117
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120__secondary_hold_spinloop:
121 .llong 0x0
122
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126__secondary_hold_acknowledge:
127 .llong 0x0
128
129 . = 0x60
130/*
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
136 */
137_GLOBAL(__secondary_hold)
138 mfmsr r24
139 ori r24,r24,MSR_RI
140 mtmsrd r24 /* RI on */
141
142 /* Grab our linux cpu number */
143 mr r24,r3
144
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
149 sync
150
151 /* All secondary cpus wait here until told to start. */
152100: ld r4,__secondary_hold_spinloop@l(0)
153 cmpdi 0,r4,1
154 bne 100b
155
156#ifdef CONFIG_HMT
157 b .hmt_init
158#else
159#ifdef CONFIG_SMP
160 mr r3,r24
161 b .pSeries_secondary_smp_init
162#else
163 BUG_OPCODE
164#endif
165#endif
166
167/* This value is used to mark exception frames on the stack. */
168 .section ".toc","aw"
169exception_marker:
170 .tc ID_72656773_68657265[TC],0x7265677368657265
171 .text
172
173/*
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers. They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL. One day... - paulus
179 */
180
181/*
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
185 */
186
187/*
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
190 */
191#define EX_R9 0
192#define EX_R10 8
193#define EX_R11 16
194#define EX_R12 24
195#define EX_R13 32
196#define EX_SRR0 40
197#define EX_DAR 48
198#define EX_DSISR 56
199#define EX_CCR 60
200#define EX_R3 64
201#define EX_LR 72
202
203#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \
211 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \
214 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SPRN_SRR1,r10; \
220 rfid; \
221 b . /* prevent speculative execution */
222
223/*
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
226 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \
235 mfcr r9
236
237#define EXCEPTION_PROLOG_ISERIES_2 \
238 mfmsr r10; \
239 ld r11,PACALPPACA+LPPACASRR0(r13); \
240 ld r12,PACALPPACA+LPPACASRR1(r13); \
241 ori r10,r10,MSR_RI; \
242 mtmsrd r10,1
243
244/*
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address. We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
249 *
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
253 */
254#define EXCEPTION_PROLOG_COMMON(n, area) \
255 andi. r10,r12,MSR_PR; /* See if coming from user */ \
256 mr r10,r1; /* Save r1 */ \
257 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
258 beq- 1f; \
259 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2601: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
261 bge- cr1,bad_stack; /* abort if it is */ \
262 std r9,_CCR(r1); /* save CR in stackframe */ \
263 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
264 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
265 std r10,0(r1); /* make stack chain pointer */ \
266 std r0,GPR0(r1); /* save r0 in stackframe */ \
267 std r10,GPR1(r1); /* save r1 in stackframe */ \
268 std r2,GPR2(r1); /* save r2 in stackframe */ \
269 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
270 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
271 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
272 ld r10,area+EX_R10(r13); \
273 std r9,GPR9(r1); \
274 std r10,GPR10(r1); \
275 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
276 ld r10,area+EX_R12(r13); \
277 ld r11,area+EX_R13(r13); \
278 std r9,GPR11(r1); \
279 std r10,GPR12(r1); \
280 std r11,GPR13(r1); \
281 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
282 mflr r9; /* save LR in stackframe */ \
283 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \
286 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \
288 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \
290 li r10,0; \
291 ld r11,exception_marker@toc(r2); \
292 std r10,RESULT(r1); /* clear regs->result */ \
293 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
294
295/*
296 * Exception vectors.
297 */
298#define STD_EXCEPTION_PSERIES(n, label) \
299 . = n; \
300 .globl label##_pSeries; \
301label##_pSeries: \
302 HMT_MEDIUM; \
303 mtspr SPRN_SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306
307#define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_iSeries; \
309label##_iSeries: \
310 HMT_MEDIUM; \
311 mtspr SPRN_SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \
315 b label##_common
316
317#define MASKABLE_EXCEPTION_ISERIES(n, label) \
318 .globl label##_iSeries; \
319label##_iSeries: \
320 HMT_MEDIUM; \
321 mtspr SPRN_SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \
325 cmpwi 0,r10,0; \
326 beq- label##_iSeries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
328 b label##_common; \
329
330#ifdef DO_SOFT_DISABLE
331#define DISABLE_INTS \
332 lbz r10,PACAPROCENABLED(r13); \
333 li r11,0; \
334 std r10,SOFTE(r1); \
335 mfmsr r10; \
336 stb r11,PACAPROCENABLED(r13); \
337 ori r10,r10,MSR_EE; \
338 mtmsrd r10,1
339
340#define ENABLE_INTS \
341 lbz r10,PACAPROCENABLED(r13); \
342 mfmsr r11; \
343 std r10,SOFTE(r1); \
344 ori r11,r11,MSR_EE; \
345 mtmsrd r11,1
346
347#else /* hard enable/disable interrupts */
348#define DISABLE_INTS
349
350#define ENABLE_INTS \
351 ld r12,_MSR(r1); \
352 mfmsr r11; \
353 rlwimi r11,r12,0,MSR_EE; \
354 mtmsrd r11,1
355
356#endif
357
358#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
359 .align 7; \
360 .globl label##_common; \
361label##_common: \
362 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
363 DISABLE_INTS; \
364 bl .save_nvgprs; \
365 addi r3,r1,STACK_FRAME_OVERHEAD; \
366 bl hdlr; \
367 b .ret_from_except
368
369#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
370 .align 7; \
371 .globl label##_common; \
372label##_common: \
373 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
374 DISABLE_INTS; \
375 addi r3,r1,STACK_FRAME_OVERHEAD; \
376 bl hdlr; \
377 b .ret_from_except_lite
378
379/*
380 * Start of pSeries system interrupt routines
381 */
382 . = 0x100
383 .globl __start_interrupts
384__start_interrupts:
385
386 STD_EXCEPTION_PSERIES(0x100, system_reset)
387
388 . = 0x200
389_machine_check_pSeries:
390 HMT_MEDIUM
391 mtspr SPRN_SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394
395 . = 0x300
396 .globl data_access_pSeries
397data_access_pSeries:
398 HMT_MEDIUM
399 mtspr SPRN_SPRG1,r13
400BEGIN_FTR_SECTION
401 mtspr SPRN_SPRG2,r12
402 mfspr r13,SPRN_DAR
403 mfspr r12,SPRN_DSISR
404 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20
406 mfcr r12
407 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12
410 mfspr r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413
414 . = 0x380
415 .globl data_access_slb_pSeries
416data_access_slb_pSeries:
417 HMT_MEDIUM
418 mtspr SPRN_SPRG1,r13
419 RUNLATCH_ON(r13)
420 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r3,PACA_EXSLB+EX_R3(r13)
422 mfspr r3,SPRN_DAR
423 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
424 mfcr r9
425#ifdef __DISABLED__
426 /* Keep that around for when we re-implement dynamic VSIDs */
427 cmpdi r3,0
428 bge slb_miss_user_pseries
429#endif /* __DISABLED__ */
430 std r10,PACA_EXSLB+EX_R10(r13)
431 std r11,PACA_EXSLB+EX_R11(r13)
432 std r12,PACA_EXSLB+EX_R12(r13)
433 mfspr r10,SPRN_SPRG1
434 std r10,PACA_EXSLB+EX_R13(r13)
435 mfspr r12,SPRN_SRR1 /* and SRR1 */
436 b .slb_miss_realmode /* Rel. branch works in real mode */
437
438 STD_EXCEPTION_PSERIES(0x400, instruction_access)
439
440 . = 0x480
441 .globl instruction_access_slb_pSeries
442instruction_access_slb_pSeries:
443 HMT_MEDIUM
444 mtspr SPRN_SPRG1,r13
445 RUNLATCH_ON(r13)
446 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
450 mfcr r9
451#ifdef __DISABLED__
452 /* Keep that around for when we re-implement dynamic VSIDs */
453 cmpdi r3,0
454 bge slb_miss_user_pseries
455#endif /* __DISABLED__ */
456 std r10,PACA_EXSLB+EX_R10(r13)
457 std r11,PACA_EXSLB+EX_R11(r13)
458 std r12,PACA_EXSLB+EX_R12(r13)
459 mfspr r10,SPRN_SPRG1
460 std r10,PACA_EXSLB+EX_R13(r13)
461 mfspr r12,SPRN_SRR1 /* and SRR1 */
462 b .slb_miss_realmode /* Rel. branch works in real mode */
463
464 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
465 STD_EXCEPTION_PSERIES(0x600, alignment)
466 STD_EXCEPTION_PSERIES(0x700, program_check)
467 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
468 STD_EXCEPTION_PSERIES(0x900, decrementer)
469 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
470 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
471
472 . = 0xc00
473 .globl system_call_pSeries
474system_call_pSeries:
475 HMT_MEDIUM
476 RUNLATCH_ON(r9)
477 mr r9,r13
478 mfmsr r10
479 mfspr r13,SPRN_SPRG3
480 mfspr r11,SPRN_SRR0
481 clrrdi r12,r13,32
482 oris r12,r12,system_call_common@h
483 ori r12,r12,system_call_common@l
484 mtspr SPRN_SRR0,r12
485 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
486 mfspr r12,SPRN_SRR1
487 mtspr SPRN_SRR1,r10
488 rfid
489 b . /* prevent speculative execution */
490
491 STD_EXCEPTION_PSERIES(0xd00, single_step)
492 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
493
494 /* We need to deal with the Altivec unavailable exception
495 * here which is at 0xf20, thus in the middle of the
496 * prolog code of the PerformanceMonitor one. A little
497 * trickery is thus necessary
498 */
499 . = 0xf00
500 b performance_monitor_pSeries
501
502 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
503
504 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
505 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
506
507 . = 0x3000
508
509/*** pSeries interrupt support ***/
510
511 /* moved from 0xf00 */
512 STD_EXCEPTION_PSERIES(., performance_monitor)
513
514 .align 7
515_GLOBAL(do_stab_bolted_pSeries)
516 mtcrf 0x80,r12
517 mfspr r12,SPRN_SPRG2
518 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
519
520/*
521 * We have some room here we use that to put
522 * the peries slb miss user trampoline code so it's reasonably
523 * away from slb_miss_user_common to avoid problems with rfid
524 *
525 * This is used for when the SLB miss handler has to go virtual,
526 * which doesn't happen for now anymore but will once we re-implement
527 * dynamic VSIDs for shared page tables
528 */
529#ifdef __DISABLED__
530slb_miss_user_pseries:
531 std r10,PACA_EXGEN+EX_R10(r13)
532 std r11,PACA_EXGEN+EX_R11(r13)
533 std r12,PACA_EXGEN+EX_R12(r13)
534 mfspr r10,SPRG1
535 ld r11,PACA_EXSLB+EX_R9(r13)
536 ld r12,PACA_EXSLB+EX_R3(r13)
537 std r10,PACA_EXGEN+EX_R13(r13)
538 std r11,PACA_EXGEN+EX_R9(r13)
539 std r12,PACA_EXGEN+EX_R3(r13)
540 clrrdi r12,r13,32
541 mfmsr r10
542 mfspr r11,SRR0 /* save SRR0 */
543 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
544 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
545 mtspr SRR0,r12
546 mfspr r12,SRR1 /* and SRR1 */
547 mtspr SRR1,r10
548 rfid
549 b . /* prevent spec. execution */
550#endif /* __DISABLED__ */
551
552/*
553 * Vectors for the FWNMI option. Share common code.
554 */
555 .globl system_reset_fwnmi
556system_reset_fwnmi:
557 HMT_MEDIUM
558 mtspr SPRN_SPRG1,r13 /* save r13 */
559 RUNLATCH_ON(r13)
560 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
561
562 .globl machine_check_fwnmi
563machine_check_fwnmi:
564 HMT_MEDIUM
565 mtspr SPRN_SPRG1,r13 /* save r13 */
566 RUNLATCH_ON(r13)
567 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
568
569#ifdef CONFIG_PPC_ISERIES
570/*** ISeries-LPAR interrupt handlers ***/
571
572 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
573
574 .globl data_access_iSeries
575data_access_iSeries:
576 mtspr SPRN_SPRG1,r13
577BEGIN_FTR_SECTION
578 mtspr SPRN_SPRG2,r12
579 mfspr r13,SPRN_DAR
580 mfspr r12,SPRN_DSISR
581 srdi r13,r13,60
582 rlwimi r13,r12,16,0x20
583 mfcr r12
584 cmpwi r13,0x2c
585 beq .do_stab_bolted_iSeries
586 mtcrf 0x80,r12
587 mfspr r12,SPRN_SPRG2
588END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
589 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
590 EXCEPTION_PROLOG_ISERIES_2
591 b data_access_common
592
593.do_stab_bolted_iSeries:
594 mtcrf 0x80,r12
595 mfspr r12,SPRN_SPRG2
596 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
597 EXCEPTION_PROLOG_ISERIES_2
598 b .do_stab_bolted
599
600 .globl data_access_slb_iSeries
601data_access_slb_iSeries:
602 mtspr SPRN_SPRG1,r13 /* save r13 */
603 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
604 std r3,PACA_EXSLB+EX_R3(r13)
605 mfspr r3,SPRN_DAR
606 std r9,PACA_EXSLB+EX_R9(r13)
607 mfcr r9
608#ifdef __DISABLED__
609 cmpdi r3,0
610 bge slb_miss_user_iseries
611#endif
612 std r10,PACA_EXSLB+EX_R10(r13)
613 std r11,PACA_EXSLB+EX_R11(r13)
614 std r12,PACA_EXSLB+EX_R12(r13)
615 mfspr r10,SPRN_SPRG1
616 std r10,PACA_EXSLB+EX_R13(r13)
617 ld r12,PACALPPACA+LPPACASRR1(r13);
618 b .slb_miss_realmode
619
620 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
621
622 .globl instruction_access_slb_iSeries
623instruction_access_slb_iSeries:
624 mtspr SPRN_SPRG1,r13 /* save r13 */
625 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
626 std r3,PACA_EXSLB+EX_R3(r13)
627 ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
628 std r9,PACA_EXSLB+EX_R9(r13)
629 mfcr r9
630#ifdef __DISABLED__
631 cmpdi r3,0
632 bge .slb_miss_user_iseries
633#endif
634 std r10,PACA_EXSLB+EX_R10(r13)
635 std r11,PACA_EXSLB+EX_R11(r13)
636 std r12,PACA_EXSLB+EX_R12(r13)
637 mfspr r10,SPRN_SPRG1
638 std r10,PACA_EXSLB+EX_R13(r13)
639 ld r12,PACALPPACA+LPPACASRR1(r13);
640 b .slb_miss_realmode
641
642#ifdef __DISABLED__
643slb_miss_user_iseries:
644 std r10,PACA_EXGEN+EX_R10(r13)
645 std r11,PACA_EXGEN+EX_R11(r13)
646 std r12,PACA_EXGEN+EX_R12(r13)
647 mfspr r10,SPRG1
648 ld r11,PACA_EXSLB+EX_R9(r13)
649 ld r12,PACA_EXSLB+EX_R3(r13)
650 std r10,PACA_EXGEN+EX_R13(r13)
651 std r11,PACA_EXGEN+EX_R9(r13)
652 std r12,PACA_EXGEN+EX_R3(r13)
653 EXCEPTION_PROLOG_ISERIES_2
654 b slb_miss_user_common
655#endif
656
657 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
658 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
659 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
660 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
661 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
662 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
663 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
664
665 .globl system_call_iSeries
666system_call_iSeries:
667 mr r9,r13
668 mfspr r13,SPRN_SPRG3
669 EXCEPTION_PROLOG_ISERIES_2
670 b system_call_common
671
672 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
673 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
674 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
675
676 .globl system_reset_iSeries
677system_reset_iSeries:
678 mfspr r13,SPRN_SPRG3 /* Get paca address */
679 mfmsr r24
680 ori r24,r24,MSR_RI
681 mtmsrd r24 /* RI on */
682 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
683 cmpwi 0,r24,0 /* Are we processor 0? */
684 beq .__start_initialization_iSeries /* Start up the first processor */
685 mfspr r4,SPRN_CTRLF
686 li r5,CTRL_RUNLATCH /* Turn off the run light */
687 andc r4,r4,r5
688 mtspr SPRN_CTRLT,r4
689
6901:
691 HMT_LOW
692#ifdef CONFIG_SMP
693 lbz r23,PACAPROCSTART(r13) /* Test if this processor
694 * should start */
695 sync
696 LOADADDR(r3,current_set)
697 sldi r28,r24,3 /* get current_set[cpu#] */
698 ldx r3,r3,r28
699 addi r1,r3,THREAD_SIZE
700 subi r1,r1,STACK_FRAME_OVERHEAD
701
702 cmpwi 0,r23,0
703 beq iSeries_secondary_smp_loop /* Loop until told to go */
704 bne .__secondary_start /* Loop until told to go */
705iSeries_secondary_smp_loop:
706 /* Let the Hypervisor know we are alive */
707 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
708 lis r3,0x8002
709 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
710#else /* CONFIG_SMP */
711 /* Yield the processor. This is required for non-SMP kernels
712 which are running on multi-threaded machines. */
713 lis r3,0x8000
714 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
715 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
716 li r4,0 /* "yield timed" */
717 li r5,-1 /* "yield forever" */
718#endif /* CONFIG_SMP */
719 li r0,-1 /* r0=-1 indicates a Hypervisor call */
720 sc /* Invoke the hypervisor via a system call */
721 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
722 b 1b /* If SMP not configured, secondaries
723 * loop forever */
724
725 .globl decrementer_iSeries_masked
726decrementer_iSeries_masked:
727 li r11,1
728 stb r11,PACALPPACA+LPPACADECRINT(r13)
729 lwz r12,PACADEFAULTDECR(r13)
730 mtspr SPRN_DEC,r12
731 /* fall through */
732
733 .globl hardware_interrupt_iSeries_masked
734hardware_interrupt_iSeries_masked:
735 mtcrf 0x80,r9 /* Restore regs */
736 ld r11,PACALPPACA+LPPACASRR0(r13)
737 ld r12,PACALPPACA+LPPACASRR1(r13)
738 mtspr SPRN_SRR0,r11
739 mtspr SPRN_SRR1,r12
740 ld r9,PACA_EXGEN+EX_R9(r13)
741 ld r10,PACA_EXGEN+EX_R10(r13)
742 ld r11,PACA_EXGEN+EX_R11(r13)
743 ld r12,PACA_EXGEN+EX_R12(r13)
744 ld r13,PACA_EXGEN+EX_R13(r13)
745 rfid
746 b . /* prevent speculative execution */
747#endif /* CONFIG_PPC_ISERIES */
748
749/*** Common interrupt handlers ***/
750
751 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
752
753 /*
754 * Machine check is different because we use a different
755 * save area: PACA_EXMC instead of PACA_EXGEN.
756 */
757 .align 7
758 .globl machine_check_common
759machine_check_common:
760 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
761 DISABLE_INTS
762 bl .save_nvgprs
763 addi r3,r1,STACK_FRAME_OVERHEAD
764 bl .machine_check_exception
765 b .ret_from_except
766
767 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
768 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
769 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
770 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
771 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
772 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
773 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
774#ifdef CONFIG_ALTIVEC
775 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
776#else
777 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
778#endif
779
780/*
781 * Here we have detected that the kernel stack pointer is bad.
782 * R9 contains the saved CR, r13 points to the paca,
783 * r10 contains the (bad) kernel stack pointer,
784 * r11 and r12 contain the saved SRR0 and SRR1.
785 * We switch to using an emergency stack, save the registers there,
786 * and call kernel_bad_stack(), which panics.
787 */
788bad_stack:
789 ld r1,PACAEMERGSP(r13)
790 subi r1,r1,64+INT_FRAME_SIZE
791 std r9,_CCR(r1)
792 std r10,GPR1(r1)
793 std r11,_NIP(r1)
794 std r12,_MSR(r1)
795 mfspr r11,SPRN_DAR
796 mfspr r12,SPRN_DSISR
797 std r11,_DAR(r1)
798 std r12,_DSISR(r1)
799 mflr r10
800 mfctr r11
801 mfxer r12
802 std r10,_LINK(r1)
803 std r11,_CTR(r1)
804 std r12,_XER(r1)
805 SAVE_GPR(0,r1)
806 SAVE_GPR(2,r1)
807 SAVE_4GPRS(3,r1)
808 SAVE_2GPRS(7,r1)
809 SAVE_10GPRS(12,r1)
810 SAVE_10GPRS(22,r1)
811 addi r11,r1,INT_FRAME_SIZE
812 std r11,0(r1)
813 li r12,0
814 std r12,0(r11)
815 ld r2,PACATOC(r13)
8161: addi r3,r1,STACK_FRAME_OVERHEAD
817 bl .kernel_bad_stack
818 b 1b
819
820/*
821 * Return from an exception with minimal checks.
822 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
823 * If interrupts have been enabled, or anything has been
824 * done that might have changed the scheduling status of
825 * any task or sent any task a signal, you should use
826 * ret_from_except or ret_from_except_lite instead of this.
827 */
828 .globl fast_exception_return
829fast_exception_return:
830 ld r12,_MSR(r1)
831 ld r11,_NIP(r1)
832 andi. r3,r12,MSR_RI /* check if RI is set */
833 beq- unrecov_fer
834 ld r3,_CCR(r1)
835 ld r4,_LINK(r1)
836 ld r5,_CTR(r1)
837 ld r6,_XER(r1)
838 mtcr r3
839 mtlr r4
840 mtctr r5
841 mtxer r6
842 REST_GPR(0, r1)
843 REST_8GPRS(2, r1)
844
845 mfmsr r10
846 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
847 mtmsrd r10,1
848
849 mtspr SPRN_SRR1,r12
850 mtspr SPRN_SRR0,r11
851 REST_4GPRS(10, r1)
852 ld r1,GPR1(r1)
853 rfid
854 b . /* prevent speculative execution */
855
856unrecov_fer:
857 bl .save_nvgprs
8581: addi r3,r1,STACK_FRAME_OVERHEAD
859 bl .unrecoverable_exception
860 b 1b
861
862/*
863 * Here r13 points to the paca, r9 contains the saved CR,
864 * SRR0 and SRR1 are saved in r11 and r12,
865 * r9 - r13 are saved in paca->exgen.
866 */
867 .align 7
868 .globl data_access_common
869data_access_common:
870 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
871 mfspr r10,SPRN_DAR
872 std r10,PACA_EXGEN+EX_DAR(r13)
873 mfspr r10,SPRN_DSISR
874 stw r10,PACA_EXGEN+EX_DSISR(r13)
875 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
876 ld r3,PACA_EXGEN+EX_DAR(r13)
877 lwz r4,PACA_EXGEN+EX_DSISR(r13)
878 li r5,0x300
879 b .do_hash_page /* Try to handle as hpte fault */
880
881 .align 7
882 .globl instruction_access_common
883instruction_access_common:
884 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
885 ld r3,_NIP(r1)
886 andis. r4,r12,0x5820
887 li r5,0x400
888 b .do_hash_page /* Try to handle as hpte fault */
889
890/*
891 * Here is the common SLB miss user that is used when going to virtual
892 * mode for SLB misses, that is currently not used
893 */
894#ifdef __DISABLED__
895 .align 7
896 .globl slb_miss_user_common
897slb_miss_user_common:
898 mflr r10
899 std r3,PACA_EXGEN+EX_DAR(r13)
900 stw r9,PACA_EXGEN+EX_CCR(r13)
901 std r10,PACA_EXGEN+EX_LR(r13)
902 std r11,PACA_EXGEN+EX_SRR0(r13)
903 bl .slb_allocate_user
904
905 ld r10,PACA_EXGEN+EX_LR(r13)
906 ld r3,PACA_EXGEN+EX_R3(r13)
907 lwz r9,PACA_EXGEN+EX_CCR(r13)
908 ld r11,PACA_EXGEN+EX_SRR0(r13)
909 mtlr r10
910 beq- slb_miss_fault
911
912 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
913 beq- unrecov_user_slb
914 mfmsr r10
915
916.machine push
917.machine "power4"
918 mtcrf 0x80,r9
919.machine pop
920
921 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
922 mtmsrd r10,1
923
924 mtspr SRR0,r11
925 mtspr SRR1,r12
926
927 ld r9,PACA_EXGEN+EX_R9(r13)
928 ld r10,PACA_EXGEN+EX_R10(r13)
929 ld r11,PACA_EXGEN+EX_R11(r13)
930 ld r12,PACA_EXGEN+EX_R12(r13)
931 ld r13,PACA_EXGEN+EX_R13(r13)
932 rfid
933 b .
934
935slb_miss_fault:
936 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
937 ld r4,PACA_EXGEN+EX_DAR(r13)
938 li r5,0
939 std r4,_DAR(r1)
940 std r5,_DSISR(r1)
941 b .handle_page_fault
942
943unrecov_user_slb:
944 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
945 DISABLE_INTS
946 bl .save_nvgprs
9471: addi r3,r1,STACK_FRAME_OVERHEAD
948 bl .unrecoverable_exception
949 b 1b
950
951#endif /* __DISABLED__ */
952
953
954/*
955 * r13 points to the PACA, r9 contains the saved CR,
956 * r12 contain the saved SRR1, SRR0 is still ready for return
957 * r3 has the faulting address
958 * r9 - r13 are saved in paca->exslb.
959 * r3 is saved in paca->slb_r3
960 * We assume we aren't going to take any exceptions during this procedure.
961 */
962_GLOBAL(slb_miss_realmode)
963 mflr r10
964
965 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
966 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
967
968 bl .slb_allocate_realmode
969
970 /* All done -- return from exception. */
971
972 ld r10,PACA_EXSLB+EX_LR(r13)
973 ld r3,PACA_EXSLB+EX_R3(r13)
974 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
975#ifdef CONFIG_PPC_ISERIES
976 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
977#endif /* CONFIG_PPC_ISERIES */
978
979 mtlr r10
980
981 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
982 beq- unrecov_slb
983
984.machine push
985.machine "power4"
986 mtcrf 0x80,r9
987 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
988.machine pop
989
990#ifdef CONFIG_PPC_ISERIES
991 mtspr SPRN_SRR0,r11
992 mtspr SPRN_SRR1,r12
993#endif /* CONFIG_PPC_ISERIES */
994 ld r9,PACA_EXSLB+EX_R9(r13)
995 ld r10,PACA_EXSLB+EX_R10(r13)
996 ld r11,PACA_EXSLB+EX_R11(r13)
997 ld r12,PACA_EXSLB+EX_R12(r13)
998 ld r13,PACA_EXSLB+EX_R13(r13)
999 rfid
1000 b . /* prevent speculative execution */
1001
1002unrecov_slb:
1003 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1004 DISABLE_INTS
1005 bl .save_nvgprs
10061: addi r3,r1,STACK_FRAME_OVERHEAD
1007 bl .unrecoverable_exception
1008 b 1b
1009
1010 .align 7
1011 .globl hardware_interrupt_common
1012 .globl hardware_interrupt_entry
1013hardware_interrupt_common:
1014 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1015hardware_interrupt_entry:
1016 DISABLE_INTS
1017 addi r3,r1,STACK_FRAME_OVERHEAD
1018 bl .do_IRQ
1019 b .ret_from_except_lite
1020
1021 .align 7
1022 .globl alignment_common
1023alignment_common:
1024 mfspr r10,SPRN_DAR
1025 std r10,PACA_EXGEN+EX_DAR(r13)
1026 mfspr r10,SPRN_DSISR
1027 stw r10,PACA_EXGEN+EX_DSISR(r13)
1028 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1029 ld r3,PACA_EXGEN+EX_DAR(r13)
1030 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1031 std r3,_DAR(r1)
1032 std r4,_DSISR(r1)
1033 bl .save_nvgprs
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1035 ENABLE_INTS
1036 bl .alignment_exception
1037 b .ret_from_except
1038
1039 .align 7
1040 .globl program_check_common
1041program_check_common:
1042 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1043 bl .save_nvgprs
1044 addi r3,r1,STACK_FRAME_OVERHEAD
1045 ENABLE_INTS
1046 bl .program_check_exception
1047 b .ret_from_except
1048
1049 .align 7
1050 .globl fp_unavailable_common
1051fp_unavailable_common:
1052 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1053 bne .load_up_fpu /* if from user, just load it up */
1054 bl .save_nvgprs
1055 addi r3,r1,STACK_FRAME_OVERHEAD
1056 ENABLE_INTS
1057 bl .kernel_fp_unavailable_exception
1058 BUG_OPCODE
1059
1060 .align 7
1061 .globl altivec_unavailable_common
1062altivec_unavailable_common:
1063 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1064#ifdef CONFIG_ALTIVEC
1065BEGIN_FTR_SECTION
1066 bne .load_up_altivec /* if from user, just load it up */
1067END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1068#endif
1069 bl .save_nvgprs
1070 addi r3,r1,STACK_FRAME_OVERHEAD
1071 ENABLE_INTS
1072 bl .altivec_unavailable_exception
1073 b .ret_from_except
1074
1075#ifdef CONFIG_ALTIVEC
1076/*
1077 * load_up_altivec(unused, unused, tsk)
1078 * Disable VMX for the task which had it previously,
1079 * and save its vector registers in its thread_struct.
1080 * Enables the VMX for use in the kernel on return.
1081 * On SMP we know the VMX is free, since we give it up every
1082 * switch (ie, no lazy save of the vector registers).
1083 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1084 */
1085_STATIC(load_up_altivec)
1086 mfmsr r5 /* grab the current MSR */
1087 oris r5,r5,MSR_VEC@h
1088 mtmsrd r5 /* enable use of VMX now */
1089 isync
1090
1091/*
1092 * For SMP, we don't do lazy VMX switching because it just gets too
1093 * horrendously complex, especially when a task switches from one CPU
1094 * to another. Instead we call giveup_altvec in switch_to.
1095 * VRSAVE isn't dealt with here, that is done in the normal context
1096 * switch code. Note that we could rely on vrsave value to eventually
1097 * avoid saving all of the VREGs here...
1098 */
1099#ifndef CONFIG_SMP
1100 ld r3,last_task_used_altivec@got(r2)
1101 ld r4,0(r3)
1102 cmpdi 0,r4,0
1103 beq 1f
1104 /* Save VMX state to last_task_used_altivec's THREAD struct */
1105 addi r4,r4,THREAD
1106 SAVE_32VRS(0,r5,r4)
1107 mfvscr vr0
1108 li r10,THREAD_VSCR
1109 stvx vr0,r10,r4
1110 /* Disable VMX for last_task_used_altivec */
1111 ld r5,PT_REGS(r4)
1112 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1113 lis r6,MSR_VEC@h
1114 andc r4,r4,r6
1115 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
11161:
1117#endif /* CONFIG_SMP */
1118 /* Hack: if we get an altivec unavailable trap with VRSAVE
1119 * set to all zeros, we assume this is a broken application
1120 * that fails to set it properly, and thus we switch it to
1121 * all 1's
1122 */
1123 mfspr r4,SPRN_VRSAVE
1124 cmpdi 0,r4,0
1125 bne+ 1f
1126 li r4,-1
1127 mtspr SPRN_VRSAVE,r4
11281:
1129 /* enable use of VMX after return */
1130 ld r4,PACACURRENT(r13)
1131 addi r5,r4,THREAD /* Get THREAD */
1132 oris r12,r12,MSR_VEC@h
1133 std r12,_MSR(r1)
1134 li r4,1
1135 li r10,THREAD_VSCR
1136 stw r4,THREAD_USED_VR(r5)
1137 lvx vr0,r10,r5
1138 mtvscr vr0
1139 REST_32VRS(0,r4,r5)
1140#ifndef CONFIG_SMP
1141 /* Update last_task_used_math to 'current' */
1142 subi r4,r5,THREAD /* Back to 'current' */
1143 std r4,0(r3)
1144#endif /* CONFIG_SMP */
1145 /* restore registers and return */
1146 b fast_exception_return
1147#endif /* CONFIG_ALTIVEC */
1148
1149/*
1150 * Hash table stuff
1151 */
1152 .align 7
1153_GLOBAL(do_hash_page)
1154 std r3,_DAR(r1)
1155 std r4,_DSISR(r1)
1156
1157 andis. r0,r4,0xa450 /* weird error? */
1158 bne- .handle_page_fault /* if not, try to insert a HPTE */
1159BEGIN_FTR_SECTION
1160 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1161 bne- .do_ste_alloc /* If so handle it */
1162END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1163
1164 /*
1165 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1166 * accessing a userspace segment (even from the kernel). We assume
1167 * kernel addresses always have the high bit set.
1168 */
1169 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1170 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1171 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1172 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1173 ori r4,r4,1 /* add _PAGE_PRESENT */
1174 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1175
1176 /*
1177 * On iSeries, we soft-disable interrupts here, then
1178 * hard-enable interrupts so that the hash_page code can spin on
1179 * the hash_table_lock without problems on a shared processor.
1180 */
1181 DISABLE_INTS
1182
1183 /*
1184 * r3 contains the faulting address
1185 * r4 contains the required access permissions
1186 * r5 contains the trap number
1187 *
1188 * at return r3 = 0 for success
1189 */
1190 bl .hash_page /* build HPTE if possible */
1191 cmpdi r3,0 /* see if hash_page succeeded */
1192
1193#ifdef DO_SOFT_DISABLE
1194 /*
1195 * If we had interrupts soft-enabled at the point where the
1196 * DSI/ISI occurred, and an interrupt came in during hash_page,
1197 * handle it now.
1198 * We jump to ret_from_except_lite rather than fast_exception_return
1199 * because ret_from_except_lite will check for and handle pending
1200 * interrupts if necessary.
1201 */
1202 beq .ret_from_except_lite
1203 /* For a hash failure, we don't bother re-enabling interrupts */
1204 ble- 12f
1205
1206 /*
1207 * hash_page couldn't handle it, set soft interrupt enable back
1208 * to what it was before the trap. Note that .local_irq_restore
1209 * handles any interrupts pending at this point.
1210 */
1211 ld r3,SOFTE(r1)
1212 bl .local_irq_restore
1213 b 11f
1214#else
1215 beq fast_exception_return /* Return from exception on success */
1216 ble- 12f /* Failure return from hash_page */
1217
1218 /* fall through */
1219#endif
1220
1221/* Here we have a page fault that hash_page can't handle. */
1222_GLOBAL(handle_page_fault)
1223 ENABLE_INTS
122411: ld r4,_DAR(r1)
1225 ld r5,_DSISR(r1)
1226 addi r3,r1,STACK_FRAME_OVERHEAD
1227 bl .do_page_fault
1228 cmpdi r3,0
1229 beq+ .ret_from_except_lite
1230 bl .save_nvgprs
1231 mr r5,r3
1232 addi r3,r1,STACK_FRAME_OVERHEAD
1233 lwz r4,_DAR(r1)
1234 bl .bad_page_fault
1235 b .ret_from_except
1236
1237/* We have a page fault that hash_page could handle but HV refused
1238 * the PTE insertion
1239 */
124012: bl .save_nvgprs
1241 addi r3,r1,STACK_FRAME_OVERHEAD
1242 lwz r4,_DAR(r1)
1243 bl .low_hash_fault
1244 b .ret_from_except
1245
1246 /* here we have a segment miss */
1247_GLOBAL(do_ste_alloc)
1248 bl .ste_allocate /* try to insert stab entry */
1249 cmpdi r3,0
1250 beq+ fast_exception_return
1251 b .handle_page_fault
1252
1253/*
1254 * r13 points to the PACA, r9 contains the saved CR,
1255 * r11 and r12 contain the saved SRR0 and SRR1.
1256 * r9 - r13 are saved in paca->exslb.
1257 * We assume we aren't going to take any exceptions during this procedure.
1258 * We assume (DAR >> 60) == 0xc.
1259 */
1260 .align 7
1261_GLOBAL(do_stab_bolted)
1262 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1263 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1264
1265 /* Hash to the primary group */
1266 ld r10,PACASTABVIRT(r13)
1267 mfspr r11,SPRN_DAR
1268 srdi r11,r11,28
1269 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1270
1271 /* Calculate VSID */
1272 /* This is a kernel address, so protovsid = ESID */
1273 ASM_VSID_SCRAMBLE(r11, r9)
1274 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1275
1276 /* Search the primary group for a free entry */
12771: ld r11,0(r10) /* Test valid bit of the current ste */
1278 andi. r11,r11,0x80
1279 beq 2f
1280 addi r10,r10,16
1281 andi. r11,r10,0x70
1282 bne 1b
1283
1284 /* Stick for only searching the primary group for now. */
1285 /* At least for now, we use a very simple random castout scheme */
1286 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1287 mftb r11
1288 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1289 ori r11,r11,0x10
1290
1291 /* r10 currently points to an ste one past the group of interest */
1292 /* make it point to the randomly selected entry */
1293 subi r10,r10,128
1294 or r10,r10,r11 /* r10 is the entry to invalidate */
1295
1296 isync /* mark the entry invalid */
1297 ld r11,0(r10)
1298 rldicl r11,r11,56,1 /* clear the valid bit */
1299 rotldi r11,r11,8
1300 std r11,0(r10)
1301 sync
1302
1303 clrrdi r11,r11,28 /* Get the esid part of the ste */
1304 slbie r11
1305
13062: std r9,8(r10) /* Store the vsid part of the ste */
1307 eieio
1308
1309 mfspr r11,SPRN_DAR /* Get the new esid */
1310 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1311 ori r11,r11,0x90 /* Turn on valid and kp */
1312 std r11,0(r10) /* Put new entry back into the stab */
1313
1314 sync
1315
1316 /* All done -- return from exception. */
1317 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1318 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1319
1320 andi. r10,r12,MSR_RI
1321 beq- unrecov_slb
1322
1323 mtcrf 0x80,r9 /* restore CR */
1324
1325 mfmsr r10
1326 clrrdi r10,r10,2
1327 mtmsrd r10,1
1328
1329 mtspr SPRN_SRR0,r11
1330 mtspr SPRN_SRR1,r12
1331 ld r9,PACA_EXSLB+EX_R9(r13)
1332 ld r10,PACA_EXSLB+EX_R10(r13)
1333 ld r11,PACA_EXSLB+EX_R11(r13)
1334 ld r12,PACA_EXSLB+EX_R12(r13)
1335 ld r13,PACA_EXSLB+EX_R13(r13)
1336 rfid
1337 b . /* prevent speculative execution */
1338
1339/*
1340 * Space for CPU0's segment table.
1341 *
1342 * On iSeries, the hypervisor must fill in at least one entry before
1343 * we get control (with relocate on). The address is give to the hv
1344 * as a page number (see xLparMap in lpardata.c), so this must be at a
1345 * fixed address (the linker can't compute (u64)&initial_stab >>
1346 * PAGE_SHIFT).
1347 */
1348 . = STAB0_PHYS_ADDR /* 0x6000 */
1349 .globl initial_stab
1350initial_stab:
1351 .space 4096
1352
1353/*
1354 * Data area reserved for FWNMI option.
1355 * This address (0x7000) is fixed by the RPA.
1356 */
1357 .= 0x7000
1358 .globl fwnmi_data_area
1359fwnmi_data_area:
1360
1361 /* iSeries does not use the FWNMI stuff, so it is safe to put
1362 * this here, even if we later allow kernels that will boot on
1363 * both pSeries and iSeries */
1364#ifdef CONFIG_PPC_ISERIES
1365 . = LPARMAP_PHYS
1366#include "lparmap.s"
1367/*
1368 * This ".text" is here for old compilers that generate a trailing
1369 * .note section when compiling .c files to .s
1370 */
1371 .text
1372#endif /* CONFIG_PPC_ISERIES */
1373
1374 . = 0x8000
1375
1376/*
1377 * On pSeries, secondary processors spin in the following code.
1378 * At entry, r3 = this processor's number (physical cpu id)
1379 */
1380_GLOBAL(pSeries_secondary_smp_init)
1381 mr r24,r3
1382
1383 /* turn on 64-bit mode */
1384 bl .enable_64b_mode
1385 isync
1386
1387 /* Copy some CPU settings from CPU 0 */
1388 bl .__restore_cpu_setup
1389
1390 /* Set up a paca value for this processor. Since we have the
1391 * physical cpu id in r24, we need to search the pacas to find
1392 * which logical id maps to our physical one.
1393 */
1394 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1395 li r5,0 /* logical cpu id */
13961: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1397 cmpw r6,r24 /* Compare to our id */
1398 beq 2f
1399 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1400 addi r5,r5,1
1401 cmpwi r5,NR_CPUS
1402 blt 1b
1403
1404 mr r3,r24 /* not found, copy phys to r3 */
1405 b .kexec_wait /* next kernel might do better */
1406
14072: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1408 /* From now on, r24 is expected to be logical cpuid */
1409 mr r24,r5
14103: HMT_LOW
1411 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1412 /* start. */
1413 sync
1414
1415 /* Create a temp kernel stack for use before relocation is on. */
1416 ld r1,PACAEMERGSP(r13)
1417 subi r1,r1,STACK_FRAME_OVERHEAD
1418
1419 cmpwi 0,r23,0
1420#ifdef CONFIG_SMP
1421 bne .__secondary_start
1422#endif
1423 b 3b /* Loop until told to go */
1424
1425#ifdef CONFIG_PPC_ISERIES
1426_STATIC(__start_initialization_iSeries)
1427 /* Clear out the BSS */
1428 LOADADDR(r11,__bss_stop)
1429 LOADADDR(r8,__bss_start)
1430 sub r11,r11,r8 /* bss size */
1431 addi r11,r11,7 /* round up to an even double word */
1432 rldicl. r11,r11,61,3 /* shift right by 3 */
1433 beq 4f
1434 addi r8,r8,-8
1435 li r0,0
1436 mtctr r11 /* zero this many doublewords */
14373: stdu r0,8(r8)
1438 bdnz 3b
14394:
1440 LOADADDR(r1,init_thread_union)
1441 addi r1,r1,THREAD_SIZE
1442 li r0,0
1443 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1444
1445 LOADADDR(r3,cpu_specs)
1446 LOADADDR(r4,cur_cpu_spec)
1447 li r5,0
1448 bl .identify_cpu
1449
1450 LOADADDR(r2,__toc_start)
1451 addi r2,r2,0x4000
1452 addi r2,r2,0x4000
1453
1454 bl .iSeries_early_setup
1455 bl .early_setup
1456
1457 /* relocation is on at this point */
1458
1459 b .start_here_common
1460#endif /* CONFIG_PPC_ISERIES */
1461
1462#ifdef CONFIG_PPC_MULTIPLATFORM
1463
1464_STATIC(__mmu_off)
1465 mfmsr r3
1466 andi. r0,r3,MSR_IR|MSR_DR
1467 beqlr
1468 andc r3,r3,r0
1469 mtspr SPRN_SRR0,r4
1470 mtspr SPRN_SRR1,r3
1471 sync
1472 rfid
1473 b . /* prevent speculative execution */
1474
1475
1476/*
1477 * Here is our main kernel entry point. We support currently 2 kind of entries
1478 * depending on the value of r5.
1479 *
1480 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1481 * in r3...r7
1482 *
1483 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1484 * DT block, r4 is a physical pointer to the kernel itself
1485 *
1486 */
1487_GLOBAL(__start_initialization_multiplatform)
1488 /*
1489 * Are we booted from a PROM Of-type client-interface ?
1490 */
1491 cmpldi cr0,r5,0
1492 bne .__boot_from_prom /* yes -> prom */
1493
1494 /* Save parameters */
1495 mr r31,r3
1496 mr r30,r4
1497
1498 /* Make sure we are running in 64 bits mode */
1499 bl .enable_64b_mode
1500
1501 /* Setup some critical 970 SPRs before switching MMU off */
1502 bl .__970_cpu_preinit
1503
1504 /* cpu # */
1505 li r24,0
1506
1507 /* Switch off MMU if not already */
1508 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1509 add r4,r4,r30
1510 bl .__mmu_off
1511 b .__after_prom_start
1512
1513_STATIC(__boot_from_prom)
1514 /* Save parameters */
1515 mr r31,r3
1516 mr r30,r4
1517 mr r29,r5
1518 mr r28,r6
1519 mr r27,r7
1520
1521 /* Make sure we are running in 64 bits mode */
1522 bl .enable_64b_mode
1523
1524 /* put a relocation offset into r3 */
1525 bl .reloc_offset
1526
1527 LOADADDR(r2,__toc_start)
1528 addi r2,r2,0x4000
1529 addi r2,r2,0x4000
1530
1531 /* Relocate the TOC from a virt addr to a real addr */
1532 sub r2,r2,r3
1533
1534 /* Restore parameters */
1535 mr r3,r31
1536 mr r4,r30
1537 mr r5,r29
1538 mr r6,r28
1539 mr r7,r27
1540
1541 /* Do all of the interaction with OF client interface */
1542 bl .prom_init
1543 /* We never return */
1544 trap
1545
1546/*
1547 * At this point, r3 contains the physical address we are running at,
1548 * returned by prom_init()
1549 */
1550_STATIC(__after_prom_start)
1551
1552/*
1553 * We need to run with __start at physical address 0.
1554 * This will leave some code in the first 256B of
1555 * real memory, which are reserved for software use.
1556 * The remainder of the first page is loaded with the fixed
1557 * interrupt vectors. The next two pages are filled with
1558 * unknown exception placeholders.
1559 *
1560 * Note: This process overwrites the OF exception vectors.
1561 * r26 == relocation offset
1562 * r27 == KERNELBASE
1563 */
1564 bl .reloc_offset
1565 mr r26,r3
1566 SET_REG_TO_CONST(r27,KERNELBASE)
1567
1568 li r3,0 /* target addr */
1569
1570 // XXX FIXME: Use phys returned by OF (r30)
1571 sub r4,r27,r26 /* source addr */
1572 /* current address of _start */
1573 /* i.e. where we are running */
1574 /* the source addr */
1575
1576 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1577 sub r5,r5,r27
1578
1579 li r6,0x100 /* Start offset, the first 0x100 */
1580 /* bytes were copied earlier. */
1581
1582 bl .copy_and_flush /* copy the first n bytes */
1583 /* this includes the code being */
1584 /* executed here. */
1585
1586 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1587 mtctr r0 /* that we just made/relocated */
1588 bctr
1589
15904: LOADADDR(r5,klimit)
1591 sub r5,r5,r26
1592 ld r5,0(r5) /* get the value of klimit */
1593 sub r5,r5,r27
1594 bl .copy_and_flush /* copy the rest */
1595 b .start_here_multiplatform
1596
1597#endif /* CONFIG_PPC_MULTIPLATFORM */
1598
1599/*
1600 * Copy routine used to copy the kernel to start at physical address 0
1601 * and flush and invalidate the caches as needed.
1602 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1603 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1604 *
1605 * Note: this routine *only* clobbers r0, r6 and lr
1606 */
1607_GLOBAL(copy_and_flush)
1608 addi r5,r5,-8
1609 addi r6,r6,-8
16104: li r0,16 /* Use the least common */
1611 /* denominator cache line */
1612 /* size. This results in */
1613 /* extra cache line flushes */
1614 /* but operation is correct. */
1615 /* Can't get cache line size */
1616 /* from NACA as it is being */
1617 /* moved too. */
1618
1619 mtctr r0 /* put # words/line in ctr */
16203: addi r6,r6,8 /* copy a cache line */
1621 ldx r0,r6,r4
1622 stdx r0,r6,r3
1623 bdnz 3b
1624 dcbst r6,r3 /* write it to memory */
1625 sync
1626 icbi r6,r3 /* flush the icache line */
1627 cmpld 0,r6,r5
1628 blt 4b
1629 sync
1630 addi r5,r5,8
1631 addi r6,r6,8
1632 blr
1633
1634.align 8
1635copy_to_here:
1636
1637#ifdef CONFIG_SMP
1638#ifdef CONFIG_PPC_PMAC
1639/*
1640 * On PowerMac, secondary processors starts from the reset vector, which
1641 * is temporarily turned into a call to one of the functions below.
1642 */
1643 .section ".text";
1644 .align 2 ;
1645
1646 .globl __secondary_start_pmac_0
1647__secondary_start_pmac_0:
1648 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1649 li r24,0
1650 b 1f
1651 li r24,1
1652 b 1f
1653 li r24,2
1654 b 1f
1655 li r24,3
16561:
1657
1658_GLOBAL(pmac_secondary_start)
1659 /* turn on 64-bit mode */
1660 bl .enable_64b_mode
1661 isync
1662
1663 /* Copy some CPU settings from CPU 0 */
1664 bl .__restore_cpu_setup
1665
1666 /* pSeries do that early though I don't think we really need it */
1667 mfmsr r3
1668 ori r3,r3,MSR_RI
1669 mtmsrd r3 /* RI on */
1670
1671 /* Set up a paca value for this processor. */
1672 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1673 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1674 add r13,r13,r4 /* for this processor. */
1675 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1676
1677 /* Create a temp kernel stack for use before relocation is on. */
1678 ld r1,PACAEMERGSP(r13)
1679 subi r1,r1,STACK_FRAME_OVERHEAD
1680
1681 b .__secondary_start
1682
1683#endif /* CONFIG_PPC_PMAC */
1684
1685/*
1686 * This function is called after the master CPU has released the
1687 * secondary processors. The execution environment is relocation off.
1688 * The paca for this processor has the following fields initialized at
1689 * this point:
1690 * 1. Processor number
1691 * 2. Segment table pointer (virtual address)
1692 * On entry the following are set:
1693 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1694 * r24 = cpu# (in Linux terms)
1695 * r13 = paca virtual address
1696 * SPRG3 = paca virtual address
1697 */
1698_GLOBAL(__secondary_start)
1699
1700 HMT_MEDIUM /* Set thread priority to MEDIUM */
1701
1702 ld r2,PACATOC(r13)
1703
1704 /* Do early setup for that CPU */
1705 bl .early_setup_secondary
1706
1707 /* Initialize the kernel stack. Just a repeat for iSeries. */
1708 LOADADDR(r3,current_set)
1709 sldi r28,r24,3 /* get current_set[cpu#] */
1710 ldx r1,r3,r28
1711 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1712 std r1,PACAKSAVE(r13)
1713
1714 li r7,0
1715 mtlr r7
1716
1717 /* enable MMU and jump to start_secondary */
1718 LOADADDR(r3,.start_secondary_prolog)
1719 SET_REG_TO_CONST(r4, MSR_KERNEL)
1720#ifdef DO_SOFT_DISABLE
1721 ori r4,r4,MSR_EE
1722#endif
1723 mtspr SPRN_SRR0,r3
1724 mtspr SPRN_SRR1,r4
1725 rfid
1726 b . /* prevent speculative execution */
1727
1728/*
1729 * Running with relocation on at this point. All we want to do is
1730 * zero the stack back-chain pointer before going into C code.
1731 */
1732_GLOBAL(start_secondary_prolog)
1733 li r3,0
1734 std r3,0(r1) /* Zero the stack frame pointer */
1735 bl .start_secondary
1736#endif
1737
1738/*
1739 * This subroutine clobbers r11 and r12
1740 */
1741_GLOBAL(enable_64b_mode)
1742 mfmsr r11 /* grab the current MSR */
1743 li r12,1
1744 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1745 or r11,r11,r12
1746 li r12,1
1747 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1748 or r11,r11,r12
1749 mtmsrd r11
1750 isync
1751 blr
1752
1753#ifdef CONFIG_PPC_MULTIPLATFORM
1754/*
1755 * This is where the main kernel code starts.
1756 */
1757_STATIC(start_here_multiplatform)
1758 /* get a new offset, now that the kernel has moved. */
1759 bl .reloc_offset
1760 mr r26,r3
1761
1762 /* Clear out the BSS. It may have been done in prom_init,
1763 * already but that's irrelevant since prom_init will soon
1764 * be detached from the kernel completely. Besides, we need
1765 * to clear it now for kexec-style entry.
1766 */
1767 LOADADDR(r11,__bss_stop)
1768 LOADADDR(r8,__bss_start)
1769 sub r11,r11,r8 /* bss size */
1770 addi r11,r11,7 /* round up to an even double word */
1771 rldicl. r11,r11,61,3 /* shift right by 3 */
1772 beq 4f
1773 addi r8,r8,-8
1774 li r0,0
1775 mtctr r11 /* zero this many doublewords */
17763: stdu r0,8(r8)
1777 bdnz 3b
17784:
1779
1780 mfmsr r6
1781 ori r6,r6,MSR_RI
1782 mtmsrd r6 /* RI on */
1783
1784#ifdef CONFIG_HMT
1785 /* Start up the second thread on cpu 0 */
1786 mfspr r3,SPRN_PVR
1787 srwi r3,r3,16
1788 cmpwi r3,0x34 /* Pulsar */
1789 beq 90f
1790 cmpwi r3,0x36 /* Icestar */
1791 beq 90f
1792 cmpwi r3,0x37 /* SStar */
1793 beq 90f
1794 b 91f /* HMT not supported */
179590: li r3,0
1796 bl .hmt_start_secondary
179791:
1798#endif
1799
1800 /* The following gets the stack and TOC set up with the regs */
1801 /* pointing to the real addr of the kernel stack. This is */
1802 /* all done to support the C function call below which sets */
1803 /* up the htab. This is done because we have relocated the */
1804 /* kernel but are still running in real mode. */
1805
1806 LOADADDR(r3,init_thread_union)
1807 sub r3,r3,r26
1808
1809 /* set up a stack pointer (physical address) */
1810 addi r1,r3,THREAD_SIZE
1811 li r0,0
1812 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1813
1814 /* set up the TOC (physical address) */
1815 LOADADDR(r2,__toc_start)
1816 addi r2,r2,0x4000
1817 addi r2,r2,0x4000
1818 sub r2,r2,r26
1819
1820 LOADADDR(r3,cpu_specs)
1821 sub r3,r3,r26
1822 LOADADDR(r4,cur_cpu_spec)
1823 sub r4,r4,r26
1824 mr r5,r26
1825 bl .identify_cpu
1826
1827 /* Save some low level config HIDs of CPU0 to be copied to
1828 * other CPUs later on, or used for suspend/resume
1829 */
1830 bl .__save_cpu_setup
1831 sync
1832
1833 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1834 * note that boot_cpuid can always be 0 nowadays since there is
1835 * nowhere it can be initialized differently before we reach this
1836 * code
1837 */
1838 LOADADDR(r27, boot_cpuid)
1839 sub r27,r27,r26
1840 lwz r27,0(r27)
1841
1842 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1843 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1844 add r13,r13,r24 /* for this processor. */
1845 sub r13,r13,r26 /* convert to physical addr */
1846 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1847
1848 /* Do very early kernel initializations, including initial hash table,
1849 * stab and slb setup before we turn on relocation. */
1850
1851 /* Restore parameters passed from prom_init/kexec */
1852 mr r3,r31
1853 bl .early_setup
1854
1855 LOADADDR(r3,.start_here_common)
1856 SET_REG_TO_CONST(r4, MSR_KERNEL)
1857 mtspr SPRN_SRR0,r3
1858 mtspr SPRN_SRR1,r4
1859 rfid
1860 b . /* prevent speculative execution */
1861#endif /* CONFIG_PPC_MULTIPLATFORM */
1862
1863 /* This is where all platforms converge execution */
1864_STATIC(start_here_common)
1865 /* relocation is on at this point */
1866
1867 /* The following code sets up the SP and TOC now that we are */
1868 /* running with translation enabled. */
1869
1870 LOADADDR(r3,init_thread_union)
1871
1872 /* set up the stack */
1873 addi r1,r3,THREAD_SIZE
1874 li r0,0
1875 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1876
1877 /* Apply the CPUs-specific fixups (nop out sections not relevant
1878 * to this CPU
1879 */
1880 li r3,0
1881 bl .do_cpu_ftr_fixups
1882
1883 LOADADDR(r26, boot_cpuid)
1884 lwz r26,0(r26)
1885
1886 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1887 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1888 add r13,r13,r24 /* for this processor. */
1889 mtspr SPRN_SPRG3,r13
1890
1891 /* ptr to current */
1892 LOADADDR(r4,init_task)
1893 std r4,PACACURRENT(r13)
1894
1895 /* Load the TOC */
1896 ld r2,PACATOC(r13)
1897 std r1,PACAKSAVE(r13)
1898
1899 bl .setup_system
1900
1901 /* Load up the kernel context */
19025:
1903#ifdef DO_SOFT_DISABLE
1904 li r5,0
1905 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1906 mfmsr r5
1907 ori r5,r5,MSR_EE /* Hard Enabled */
1908 mtmsrd r5
1909#endif
1910
1911 bl .start_kernel
1912
1913_GLOBAL(hmt_init)
1914#ifdef CONFIG_HMT
1915 LOADADDR(r5, hmt_thread_data)
1916 mfspr r7,SPRN_PVR
1917 srwi r7,r7,16
1918 cmpwi r7,0x34 /* Pulsar */
1919 beq 90f
1920 cmpwi r7,0x36 /* Icestar */
1921 beq 91f
1922 cmpwi r7,0x37 /* SStar */
1923 beq 91f
1924 b 101f
192590: mfspr r6,SPRN_PIR
1926 andi. r6,r6,0x1f
1927 b 92f
192891: mfspr r6,SPRN_PIR
1929 andi. r6,r6,0x3ff
193092: sldi r4,r24,3
1931 stwx r6,r5,r4
1932 bl .hmt_start_secondary
1933 b 101f
1934
1935__hmt_secondary_hold:
1936 LOADADDR(r5, hmt_thread_data)
1937 clrldi r5,r5,4
1938 li r7,0
1939 mfspr r6,SPRN_PIR
1940 mfspr r8,SPRN_PVR
1941 srwi r8,r8,16
1942 cmpwi r8,0x34
1943 bne 93f
1944 andi. r6,r6,0x1f
1945 b 103f
194693: andi. r6,r6,0x3f
1947
1948103: lwzx r8,r5,r7
1949 cmpw r8,r6
1950 beq 104f
1951 addi r7,r7,8
1952 b 103b
1953
1954104: addi r7,r7,4
1955 lwzx r9,r5,r7
1956 mr r24,r9
1957101:
1958#endif
1959 mr r3,r24
1960 b .pSeries_secondary_smp_init
1961
1962#ifdef CONFIG_HMT
1963_GLOBAL(hmt_start_secondary)
1964 LOADADDR(r4,__hmt_secondary_hold)
1965 clrldi r4,r4,4
1966 mtspr SPRN_NIADORM, r4
1967 mfspr r4, SPRN_MSRDORM
1968 li r5, -65
1969 and r4, r4, r5
1970 mtspr SPRN_MSRDORM, r4
1971 lis r4,0xffef
1972 ori r4,r4,0x7403
1973 mtspr SPRN_TSC, r4
1974 li r4,0x1f4
1975 mtspr SPRN_TST, r4
1976 mfspr r4, SPRN_HID0
1977 ori r4, r4, 0x1
1978 mtspr SPRN_HID0, r4
1979 mfspr r4, SPRN_CTRLF
1980 oris r4, r4, 0x40
1981 mtspr SPRN_CTRLT, r4
1982 blr
1983#endif
1984
1985/*
1986 * We put a few things here that have to be page-aligned.
1987 * This stuff goes at the beginning of the bss, which is page-aligned.
1988 */
1989 .section ".bss"
1990
1991 .align PAGE_SHIFT
1992
1993 .globl empty_zero_page
1994empty_zero_page:
1995 .space PAGE_SIZE
1996
1997 .globl swapper_pg_dir
1998swapper_pg_dir:
1999 .space PAGE_SIZE
2000
2001/*
2002 * This space gets a copy of optional info passed to us by the bootstrap
2003 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2004 */
2005 .globl cmd_line
2006cmd_line:
2007 .space COMMAND_LINE_SIZE
diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c
deleted file mode 100644
index 138e128a3886..000000000000
--- a/arch/ppc64/kernel/hvconsole.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * hvconsole.c
3 * Copyright (C) 2004 Hollis Blanchard, IBM Corporation
4 * Copyright (C) 2004 IBM Corporation
5 *
6 * Additional Author(s):
7 * Ryan S. Arnold <rsa@us.ibm.com>
8 *
9 * LPAR console support.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <asm/hvcall.h>
29#include <asm/hvconsole.h>
30
31/**
32 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
33 * @vtermno: The vtermno or unit_address of the adapter from which to fetch the
34 * data.
35 * @buf: The character buffer into which to put the character data fetched from
36 * firmware.
37 * @count: not used?
38 */
39int hvc_get_chars(uint32_t vtermno, char *buf, int count)
40{
41 unsigned long got;
42
43 if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
44 (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
45 return got;
46 return 0;
47}
48
49EXPORT_SYMBOL(hvc_get_chars);
50
51
52/**
53 * hvc_put_chars: send characters to firmware for denoted vterm adapter
54 * @vtermno: The vtermno or unit_address of the adapter from which the data
55 * originated.
56 * @buf: The character buffer that contains the character data to send to
57 * firmware.
58 * @count: Send this number of characters.
59 */
60int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
61{
62 unsigned long *lbuf = (unsigned long *) buf;
63 long ret;
64
65 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
66 lbuf[1]);
67 if (ret == H_Success)
68 return count;
69 if (ret == H_Busy)
70 return 0;
71 return -EIO;
72}
73
74EXPORT_SYMBOL(hvc_put_chars);
diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c
deleted file mode 100644
index 4d584172055a..000000000000
--- a/arch/ppc64/kernel/hvcserver.c
+++ /dev/null
@@ -1,251 +0,0 @@
1/*
2 * hvcserver.c
3 * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
4 *
5 * PPC64 virtual I/O console server support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26
27#include <asm/hvcall.h>
28#include <asm/hvcserver.h>
29#include <asm/io.h>
30
31#define HVCS_ARCH_VERSION "1.0.0"
32
33MODULE_AUTHOR("Ryan S. Arnold <rsa@us.ibm.com>");
34MODULE_DESCRIPTION("IBM hvcs ppc64 API");
35MODULE_LICENSE("GPL");
36MODULE_VERSION(HVCS_ARCH_VERSION);
37
38/*
39 * Convert arch specific return codes into relevant errnos. The hvcs
40 * functions aren't performance sensitive, so this conversion isn't an
41 * issue.
42 */
43int hvcs_convert(long to_convert)
44{
45 switch (to_convert) {
46 case H_Success:
47 return 0;
48 case H_Parameter:
49 return -EINVAL;
50 case H_Hardware:
51 return -EIO;
52 case H_Busy:
53 case H_LongBusyOrder1msec:
54 case H_LongBusyOrder10msec:
55 case H_LongBusyOrder100msec:
56 case H_LongBusyOrder1sec:
57 case H_LongBusyOrder10sec:
58 case H_LongBusyOrder100sec:
59 return -EBUSY;
60 case H_Function: /* fall through */
61 default:
62 return -EPERM;
63 }
64}
65
66/**
67 * hvcs_free_partner_info - free pi allocated by hvcs_get_partner_info
68 * @head: list_head pointer for an allocated list of partner info structs to
69 * free.
70 *
71 * This function is used to free the partner info list that was returned by
72 * calling hvcs_get_partner_info().
73 */
74int hvcs_free_partner_info(struct list_head *head)
75{
76 struct hvcs_partner_info *pi;
77 struct list_head *element;
78
79 if (!head)
80 return -EINVAL;
81
82 while (!list_empty(head)) {
83 element = head->next;
84 pi = list_entry(element, struct hvcs_partner_info, node);
85 list_del(element);
86 kfree(pi);
87 }
88
89 return 0;
90}
91EXPORT_SYMBOL(hvcs_free_partner_info);
92
93/* Helper function for hvcs_get_partner_info */
94int hvcs_next_partner(uint32_t unit_address,
95 unsigned long last_p_partition_ID,
96 unsigned long last_p_unit_address, unsigned long *pi_buff)
97
98{
99 long retval;
100 retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
101 last_p_partition_ID,
102 last_p_unit_address, virt_to_phys(pi_buff));
103 return hvcs_convert(retval);
104}
105
106/**
107 * hvcs_get_partner_info - Get all of the partner info for a vty-server adapter
108 * @unit_address: The unit_address of the vty-server adapter for which this
109 * function is fetching partner info.
110 * @head: An initialized list_head pointer to an empty list to use to return the
111 * list of partner info fetched from the hypervisor to the caller.
112 * @pi_buff: A page sized buffer pre-allocated prior to calling this function
113 * that is to be used to be used by firmware as an iterator to keep track
114 * of the partner info retrieval.
115 *
116 * This function returns non-zero on success, or if there is no partner info.
117 *
118 * The pi_buff is pre-allocated prior to calling this function because this
119 * function may be called with a spin_lock held and kmalloc of a page is not
120 * recommended as GFP_ATOMIC.
121 *
122 * The first long of this buffer is used to store a partner unit address. The
123 * second long is used to store a partner partition ID and starting at
124 * pi_buff[2] is the 79 character Converged Location Code (diff size than the
125 * unsigned longs, hence the casting mumbo jumbo you see later).
126 *
127 * Invocation of this function should always be followed by an invocation of
128 * hvcs_free_partner_info() using a pointer to the SAME list head instance
129 * that was passed as a parameter to this function.
130 */
131int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
132 unsigned long *pi_buff)
133{
134 /*
135 * Dealt with as longs because of the hcall interface even though the
136 * values are uint32_t.
137 */
138 unsigned long last_p_partition_ID;
139 unsigned long last_p_unit_address;
140 struct hvcs_partner_info *next_partner_info = NULL;
141 int more = 1;
142 int retval;
143
144 memset(pi_buff, 0x00, PAGE_SIZE);
145 /* invalid parameters */
146 if (!head || !pi_buff)
147 return -EINVAL;
148
149 last_p_partition_ID = last_p_unit_address = ~0UL;
150 INIT_LIST_HEAD(head);
151
152 do {
153 retval = hvcs_next_partner(unit_address, last_p_partition_ID,
154 last_p_unit_address, pi_buff);
155 if (retval) {
156 /*
157 * Don't indicate that we've failed if we have
158 * any list elements.
159 */
160 if (!list_empty(head))
161 return 0;
162 return retval;
163 }
164
165 last_p_partition_ID = pi_buff[0];
166 last_p_unit_address = pi_buff[1];
167
168 /* This indicates that there are no further partners */
169 if (last_p_partition_ID == ~0UL
170 && last_p_unit_address == ~0UL)
171 break;
172
173 /* This is a very small struct and will be freed soon in
174 * hvcs_free_partner_info(). */
175 next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
176 GFP_ATOMIC);
177
178 if (!next_partner_info) {
179 printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
180 " allocate partner info struct.\n");
181 hvcs_free_partner_info(head);
182 return -ENOMEM;
183 }
184
185 next_partner_info->unit_address
186 = (unsigned int)last_p_unit_address;
187 next_partner_info->partition_ID
188 = (unsigned int)last_p_partition_ID;
189
190 /* copy the Null-term char too */
191 strncpy(&next_partner_info->location_code[0],
192 (char *)&pi_buff[2],
193 strlen((char *)&pi_buff[2]) + 1);
194
195 list_add_tail(&(next_partner_info->node), head);
196 next_partner_info = NULL;
197
198 } while (more);
199
200 return 0;
201}
202EXPORT_SYMBOL(hvcs_get_partner_info);
203
204/**
205 * hvcs_register_connection - establish a connection between this vty-server and
206 * a vty.
207 * @unit_address: The unit address of the vty-server adapter that is to be
208 * establish a connection.
209 * @p_partition_ID: The partition ID of the vty adapter that is to be connected.
210 * @p_unit_address: The unit address of the vty adapter to which the vty-server
211 * is to be connected.
212 *
213 * If this function is called once and -EINVAL is returned it may
214 * indicate that the partner info needs to be refreshed for the
215 * target unit address at which point the caller must invoke
216 * hvcs_get_partner_info() and then call this function again. If,
217 * for a second time, -EINVAL is returned then it indicates that
218 * there is probably already a partner connection registered to a
219 * different vty-server adapter. It is also possible that a second
220 * -EINVAL may indicate that one of the parms is not valid, for
221 * instance if the link was removed between the vty-server adapter
222 * and the vty adapter that you are trying to open. Don't shoot the
223 * messenger. Firmware implemented it this way.
224 */
225int hvcs_register_connection( uint32_t unit_address,
226 uint32_t p_partition_ID, uint32_t p_unit_address)
227{
228 long retval;
229 retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
230 p_partition_ID, p_unit_address);
231 return hvcs_convert(retval);
232}
233EXPORT_SYMBOL(hvcs_register_connection);
234
235/**
236 * hvcs_free_connection - free the connection between a vty-server and vty
237 * @unit_address: The unit address of the vty-server that is to have its
238 * connection severed.
239 *
240 * This function is used to free the partner connection between a vty-server
241 * adapter and a vty adapter.
242 *
243 * If -EBUSY is returned continue to call this function until 0 is returned.
244 */
245int hvcs_free_connection(uint32_t unit_address)
246{
247 long retval;
248 retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
249 return hvcs_convert(retval);
250}
251EXPORT_SYMBOL(hvcs_free_connection);
diff --git a/arch/ppc64/kernel/iomap.c b/arch/ppc64/kernel/iomap.c
deleted file mode 100644
index 6160c8dbb7c5..000000000000
--- a/arch/ppc64/kernel/iomap.c
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * arch/ppc64/kernel/iomap.c
3 *
4 * ppc64 "iomap" interface implementation.
5 *
6 * (C) Copyright 2004 Linus Torvalds
7 */
8#include <linux/init.h>
9#include <linux/pci.h>
10#include <linux/mm.h>
11#include <asm/io.h>
12
13/*
14 * Here comes the ppc64 implementation of the IOMAP
15 * interfaces.
16 */
17unsigned int fastcall ioread8(void __iomem *addr)
18{
19 return readb(addr);
20}
21unsigned int fastcall ioread16(void __iomem *addr)
22{
23 return readw(addr);
24}
25unsigned int fastcall ioread16be(void __iomem *addr)
26{
27 return in_be16(addr);
28}
29unsigned int fastcall ioread32(void __iomem *addr)
30{
31 return readl(addr);
32}
33unsigned int fastcall ioread32be(void __iomem *addr)
34{
35 return in_be32(addr);
36}
37EXPORT_SYMBOL(ioread8);
38EXPORT_SYMBOL(ioread16);
39EXPORT_SYMBOL(ioread16be);
40EXPORT_SYMBOL(ioread32);
41EXPORT_SYMBOL(ioread32be);
42
43void fastcall iowrite8(u8 val, void __iomem *addr)
44{
45 writeb(val, addr);
46}
47void fastcall iowrite16(u16 val, void __iomem *addr)
48{
49 writew(val, addr);
50}
51void fastcall iowrite16be(u16 val, void __iomem *addr)
52{
53 out_be16(addr, val);
54}
55void fastcall iowrite32(u32 val, void __iomem *addr)
56{
57 writel(val, addr);
58}
59void fastcall iowrite32be(u32 val, void __iomem *addr)
60{
61 out_be32(addr, val);
62}
63EXPORT_SYMBOL(iowrite8);
64EXPORT_SYMBOL(iowrite16);
65EXPORT_SYMBOL(iowrite16be);
66EXPORT_SYMBOL(iowrite32);
67EXPORT_SYMBOL(iowrite32be);
68
69/*
70 * These are the "repeat read/write" functions. Note the
71 * non-CPU byte order. We do things in "IO byteorder"
72 * here.
73 *
74 * FIXME! We could make these do EEH handling if we really
75 * wanted. Not clear if we do.
76 */
77void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
78{
79 _insb((u8 __iomem *) addr, dst, count);
80}
81void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
82{
83 _insw_ns((u16 __iomem *) addr, dst, count);
84}
85void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
86{
87 _insl_ns((u32 __iomem *) addr, dst, count);
88}
89EXPORT_SYMBOL(ioread8_rep);
90EXPORT_SYMBOL(ioread16_rep);
91EXPORT_SYMBOL(ioread32_rep);
92
93void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
94{
95 _outsb((u8 __iomem *) addr, src, count);
96}
97void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
98{
99 _outsw_ns((u16 __iomem *) addr, src, count);
100}
101void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
102{
103 _outsl_ns((u32 __iomem *) addr, src, count);
104}
105EXPORT_SYMBOL(iowrite8_rep);
106EXPORT_SYMBOL(iowrite16_rep);
107EXPORT_SYMBOL(iowrite32_rep);
108
109void __iomem *ioport_map(unsigned long port, unsigned int len)
110{
111 if (!_IO_IS_VALID(port))
112 return NULL;
113 return (void __iomem *) (port+pci_io_base);
114}
115
116void ioport_unmap(void __iomem *addr)
117{
118 /* Nothing to do */
119}
120EXPORT_SYMBOL(ioport_map);
121EXPORT_SYMBOL(ioport_unmap);
122
123void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
124{
125 unsigned long start = pci_resource_start(dev, bar);
126 unsigned long len = pci_resource_len(dev, bar);
127 unsigned long flags = pci_resource_flags(dev, bar);
128
129 if (!len)
130 return NULL;
131 if (max && len > max)
132 len = max;
133 if (flags & IORESOURCE_IO)
134 return ioport_map(start, len);
135 if (flags & IORESOURCE_MEM)
136 return ioremap(start, len);
137 /* What? */
138 return NULL;
139}
140
141void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
142{
143 /* Nothing to do */
144}
145EXPORT_SYMBOL(pci_iomap);
146EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c
deleted file mode 100644
index 4d9b4388918b..000000000000
--- a/arch/ppc64/kernel/iommu.c
+++ /dev/null
@@ -1,572 +0,0 @@
1/*
2 * arch/ppc64/kernel/iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
8 *
9 * Dynamic DMA mapping support, bus-independent parts.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26
27#include <linux/config.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/slab.h>
31#include <linux/mm.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34#include <linux/dma-mapping.h>
35#include <linux/init.h>
36#include <linux/bitops.h>
37#include <asm/io.h>
38#include <asm/prom.h>
39#include <asm/iommu.h>
40#include <asm/pci-bridge.h>
41#include <asm/machdep.h>
42
43#define DBG(...)
44
45#ifdef CONFIG_IOMMU_VMERGE
46static int novmerge = 0;
47#else
48static int novmerge = 1;
49#endif
50
51static int __init setup_iommu(char *str)
52{
53 if (!strcmp(str, "novmerge"))
54 novmerge = 1;
55 else if (!strcmp(str, "vmerge"))
56 novmerge = 0;
57 return 1;
58}
59
60__setup("iommu=", setup_iommu);
61
62static unsigned long iommu_range_alloc(struct iommu_table *tbl,
63 unsigned long npages,
64 unsigned long *handle,
65 unsigned int align_order)
66{
67 unsigned long n, end, i, start;
68 unsigned long limit;
69 int largealloc = npages > 15;
70 int pass = 0;
71 unsigned long align_mask;
72
73 align_mask = 0xffffffffffffffffl >> (64 - align_order);
74
75 /* This allocator was derived from x86_64's bit string search */
76
77 /* Sanity check */
78 if (unlikely(npages) == 0) {
79 if (printk_ratelimit())
80 WARN_ON(1);
81 return DMA_ERROR_CODE;
82 }
83
84 if (handle && *handle)
85 start = *handle;
86 else
87 start = largealloc ? tbl->it_largehint : tbl->it_hint;
88
89 /* Use only half of the table for small allocs (15 pages or less) */
90 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
91
92 if (largealloc && start < tbl->it_halfpoint)
93 start = tbl->it_halfpoint;
94
95 /* The case below can happen if we have a small segment appended
96 * to a large, or when the previous alloc was at the very end of
97 * the available space. If so, go back to the initial start.
98 */
99 if (start >= limit)
100 start = largealloc ? tbl->it_largehint : tbl->it_hint;
101
102 again:
103
104 n = find_next_zero_bit(tbl->it_map, limit, start);
105
106 /* Align allocation */
107 n = (n + align_mask) & ~align_mask;
108
109 end = n + npages;
110
111 if (unlikely(end >= limit)) {
112 if (likely(pass < 2)) {
113 /* First failure, just rescan the half of the table.
114 * Second failure, rescan the other half of the table.
115 */
116 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
117 limit = pass ? tbl->it_size : limit;
118 pass++;
119 goto again;
120 } else {
121 /* Third failure, give up */
122 return DMA_ERROR_CODE;
123 }
124 }
125
126 for (i = n; i < end; i++)
127 if (test_bit(i, tbl->it_map)) {
128 start = i+1;
129 goto again;
130 }
131
132 for (i = n; i < end; i++)
133 __set_bit(i, tbl->it_map);
134
135 /* Bump the hint to a new block for small allocs. */
136 if (largealloc) {
137 /* Don't bump to new block to avoid fragmentation */
138 tbl->it_largehint = end;
139 } else {
140 /* Overflow will be taken care of at the next allocation */
141 tbl->it_hint = (end + tbl->it_blocksize - 1) &
142 ~(tbl->it_blocksize - 1);
143 }
144
145 /* Update handle for SG allocations */
146 if (handle)
147 *handle = end;
148
149 return n;
150}
151
152static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
153 unsigned int npages, enum dma_data_direction direction,
154 unsigned int align_order)
155{
156 unsigned long entry, flags;
157 dma_addr_t ret = DMA_ERROR_CODE;
158
159 spin_lock_irqsave(&(tbl->it_lock), flags);
160
161 entry = iommu_range_alloc(tbl, npages, NULL, align_order);
162
163 if (unlikely(entry == DMA_ERROR_CODE)) {
164 spin_unlock_irqrestore(&(tbl->it_lock), flags);
165 return DMA_ERROR_CODE;
166 }
167
168 entry += tbl->it_offset; /* Offset into real TCE table */
169 ret = entry << PAGE_SHIFT; /* Set the return dma address */
170
171 /* Put the TCEs in the HW table */
172 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
173 direction);
174
175
176 /* Flush/invalidate TLB caches if necessary */
177 if (ppc_md.tce_flush)
178 ppc_md.tce_flush(tbl);
179
180 spin_unlock_irqrestore(&(tbl->it_lock), flags);
181
182 /* Make sure updates are seen by hardware */
183 mb();
184
185 return ret;
186}
187
188static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
189 unsigned int npages)
190{
191 unsigned long entry, free_entry;
192 unsigned long i;
193
194 entry = dma_addr >> PAGE_SHIFT;
195 free_entry = entry - tbl->it_offset;
196
197 if (((free_entry + npages) > tbl->it_size) ||
198 (entry < tbl->it_offset)) {
199 if (printk_ratelimit()) {
200 printk(KERN_INFO "iommu_free: invalid entry\n");
201 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
202 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
203 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
204 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
205 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
206 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
207 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
208 WARN_ON(1);
209 }
210 return;
211 }
212
213 ppc_md.tce_free(tbl, entry, npages);
214
215 for (i = 0; i < npages; i++)
216 __clear_bit(free_entry+i, tbl->it_map);
217}
218
219static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
220 unsigned int npages)
221{
222 unsigned long flags;
223
224 spin_lock_irqsave(&(tbl->it_lock), flags);
225
226 __iommu_free(tbl, dma_addr, npages);
227
228 /* Make sure TLB cache is flushed if the HW needs it. We do
229 * not do an mb() here on purpose, it is not needed on any of
230 * the current platforms.
231 */
232 if (ppc_md.tce_flush)
233 ppc_md.tce_flush(tbl);
234
235 spin_unlock_irqrestore(&(tbl->it_lock), flags);
236}
237
238int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
239 struct scatterlist *sglist, int nelems,
240 enum dma_data_direction direction)
241{
242 dma_addr_t dma_next = 0, dma_addr;
243 unsigned long flags;
244 struct scatterlist *s, *outs, *segstart;
245 int outcount, incount;
246 unsigned long handle;
247
248 BUG_ON(direction == DMA_NONE);
249
250 if ((nelems == 0) || !tbl)
251 return 0;
252
253 outs = s = segstart = &sglist[0];
254 outcount = 1;
255 incount = nelems;
256 handle = 0;
257
258 /* Init first segment length for backout at failure */
259 outs->dma_length = 0;
260
261 DBG("mapping %d elements:\n", nelems);
262
263 spin_lock_irqsave(&(tbl->it_lock), flags);
264
265 for (s = outs; nelems; nelems--, s++) {
266 unsigned long vaddr, npages, entry, slen;
267
268 slen = s->length;
269 /* Sanity check */
270 if (slen == 0) {
271 dma_next = 0;
272 continue;
273 }
274 /* Allocate iommu entries for that segment */
275 vaddr = (unsigned long)page_address(s->page) + s->offset;
276 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
277 npages >>= PAGE_SHIFT;
278 entry = iommu_range_alloc(tbl, npages, &handle, 0);
279
280 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
281
282 /* Handle failure */
283 if (unlikely(entry == DMA_ERROR_CODE)) {
284 if (printk_ratelimit())
285 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
286 " npages %lx\n", tbl, vaddr, npages);
287 goto failure;
288 }
289
290 /* Convert entry to a dma_addr_t */
291 entry += tbl->it_offset;
292 dma_addr = entry << PAGE_SHIFT;
293 dma_addr |= s->offset;
294
295 DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
296 npages, entry, dma_addr);
297
298 /* Insert into HW table */
299 ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
300
301 /* If we are in an open segment, try merging */
302 if (segstart != s) {
303 DBG(" - trying merge...\n");
304 /* We cannot merge if:
305 * - allocated dma_addr isn't contiguous to previous allocation
306 */
307 if (novmerge || (dma_addr != dma_next)) {
308 /* Can't merge: create a new segment */
309 segstart = s;
310 outcount++; outs++;
311 DBG(" can't merge, new segment.\n");
312 } else {
313 outs->dma_length += s->length;
314 DBG(" merged, new len: %lx\n", outs->dma_length);
315 }
316 }
317
318 if (segstart == s) {
319 /* This is a new segment, fill entries */
320 DBG(" - filling new segment.\n");
321 outs->dma_address = dma_addr;
322 outs->dma_length = slen;
323 }
324
325 /* Calculate next page pointer for contiguous check */
326 dma_next = dma_addr + slen;
327
328 DBG(" - dma next is: %lx\n", dma_next);
329 }
330
331 /* Flush/invalidate TLB caches if necessary */
332 if (ppc_md.tce_flush)
333 ppc_md.tce_flush(tbl);
334
335 spin_unlock_irqrestore(&(tbl->it_lock), flags);
336
337 /* Make sure updates are seen by hardware */
338 mb();
339
340 DBG("mapped %d elements:\n", outcount);
341
342 /* For the sake of iommu_unmap_sg, we clear out the length in the
343 * next entry of the sglist if we didn't fill the list completely
344 */
345 if (outcount < incount) {
346 outs++;
347 outs->dma_address = DMA_ERROR_CODE;
348 outs->dma_length = 0;
349 }
350 return outcount;
351
352 failure:
353 for (s = &sglist[0]; s <= outs; s++) {
354 if (s->dma_length != 0) {
355 unsigned long vaddr, npages;
356
357 vaddr = s->dma_address & PAGE_MASK;
358 npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
359 >> PAGE_SHIFT;
360 __iommu_free(tbl, vaddr, npages);
361 }
362 }
363 spin_unlock_irqrestore(&(tbl->it_lock), flags);
364 return 0;
365}
366
367
368void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
369 int nelems, enum dma_data_direction direction)
370{
371 unsigned long flags;
372
373 BUG_ON(direction == DMA_NONE);
374
375 if (!tbl)
376 return;
377
378 spin_lock_irqsave(&(tbl->it_lock), flags);
379
380 while (nelems--) {
381 unsigned int npages;
382 dma_addr_t dma_handle = sglist->dma_address;
383
384 if (sglist->dma_length == 0)
385 break;
386 npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
387 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
388 __iommu_free(tbl, dma_handle, npages);
389 sglist++;
390 }
391
392 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
393 * do not do an mb() here, the affected platforms do not need it
394 * when freeing.
395 */
396 if (ppc_md.tce_flush)
397 ppc_md.tce_flush(tbl);
398
399 spin_unlock_irqrestore(&(tbl->it_lock), flags);
400}
401
402/*
403 * Build a iommu_table structure. This contains a bit map which
404 * is used to manage allocation of the tce space.
405 */
406struct iommu_table *iommu_init_table(struct iommu_table *tbl)
407{
408 unsigned long sz;
409 static int welcomed = 0;
410
411 /* Set aside 1/4 of the table for large allocations. */
412 tbl->it_halfpoint = tbl->it_size * 3 / 4;
413
414 /* number of bytes needed for the bitmap */
415 sz = (tbl->it_size + 7) >> 3;
416
417 tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
418 if (!tbl->it_map)
419 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
420
421 memset(tbl->it_map, 0, sz);
422
423 tbl->it_hint = 0;
424 tbl->it_largehint = tbl->it_halfpoint;
425 spin_lock_init(&tbl->it_lock);
426
427 /* Clear the hardware table in case firmware left allocations in it */
428 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
429
430 if (!welcomed) {
431 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
432 novmerge ? "disabled" : "enabled");
433 welcomed = 1;
434 }
435
436 return tbl;
437}
438
439void iommu_free_table(struct device_node *dn)
440{
441 struct pci_dn *pdn = dn->data;
442 struct iommu_table *tbl = pdn->iommu_table;
443 unsigned long bitmap_sz, i;
444 unsigned int order;
445
446 if (!tbl || !tbl->it_map) {
447 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
448 dn->full_name);
449 return;
450 }
451
452 /* verify that table contains no entries */
453 /* it_size is in entries, and we're examining 64 at a time */
454 for (i = 0; i < (tbl->it_size/64); i++) {
455 if (tbl->it_map[i] != 0) {
456 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
457 __FUNCTION__, dn->full_name);
458 break;
459 }
460 }
461
462 /* calculate bitmap size in bytes */
463 bitmap_sz = (tbl->it_size + 7) / 8;
464
465 /* free bitmap */
466 order = get_order(bitmap_sz);
467 free_pages((unsigned long) tbl->it_map, order);
468
469 /* free table */
470 kfree(tbl);
471}
472
473/* Creates TCEs for a user provided buffer. The user buffer must be
474 * contiguous real kernel storage (not vmalloc). The address of the buffer
475 * passed here is the kernel (virtual) address of the buffer. The buffer
476 * need not be page aligned, the dma_addr_t returned will point to the same
477 * byte within the page as vaddr.
478 */
479dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
480 size_t size, enum dma_data_direction direction)
481{
482 dma_addr_t dma_handle = DMA_ERROR_CODE;
483 unsigned long uaddr;
484 unsigned int npages;
485
486 BUG_ON(direction == DMA_NONE);
487
488 uaddr = (unsigned long)vaddr;
489 npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
490 npages >>= PAGE_SHIFT;
491
492 if (tbl) {
493 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
494 if (dma_handle == DMA_ERROR_CODE) {
495 if (printk_ratelimit()) {
496 printk(KERN_INFO "iommu_alloc failed, "
497 "tbl %p vaddr %p npages %d\n",
498 tbl, vaddr, npages);
499 }
500 } else
501 dma_handle |= (uaddr & ~PAGE_MASK);
502 }
503
504 return dma_handle;
505}
506
507void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
508 size_t size, enum dma_data_direction direction)
509{
510 BUG_ON(direction == DMA_NONE);
511
512 if (tbl)
513 iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
514 (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
515}
516
517/* Allocates a contiguous real buffer and creates mappings over it.
518 * Returns the virtual address of the buffer and sets dma_handle
519 * to the dma address (mapping) of the first page.
520 */
521void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
522 dma_addr_t *dma_handle, gfp_t flag)
523{
524 void *ret = NULL;
525 dma_addr_t mapping;
526 unsigned int npages, order;
527
528 size = PAGE_ALIGN(size);
529 npages = size >> PAGE_SHIFT;
530 order = get_order(size);
531
532 /*
533 * Client asked for way too much space. This is checked later
534 * anyway. It is easier to debug here for the drivers than in
535 * the tce tables.
536 */
537 if (order >= IOMAP_MAX_ORDER) {
538 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
539 return NULL;
540 }
541
542 if (!tbl)
543 return NULL;
544
545 /* Alloc enough pages (and possibly more) */
546 ret = (void *)__get_free_pages(flag, order);
547 if (!ret)
548 return NULL;
549 memset(ret, 0, size);
550
551 /* Set up tces to cover the allocated range */
552 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
553 if (mapping == DMA_ERROR_CODE) {
554 free_pages((unsigned long)ret, order);
555 ret = NULL;
556 } else
557 *dma_handle = mapping;
558 return ret;
559}
560
561void iommu_free_coherent(struct iommu_table *tbl, size_t size,
562 void *vaddr, dma_addr_t dma_handle)
563{
564 unsigned int npages;
565
566 if (tbl) {
567 size = PAGE_ALIGN(size);
568 npages = size >> PAGE_SHIFT;
569 iommu_free(tbl, dma_handle, npages);
570 free_pages((unsigned long)vaddr, get_order(size));
571 }
572}
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
deleted file mode 100644
index 511af54e6230..000000000000
--- a/arch/ppc64/kernel/kprobes.c
+++ /dev/null
@@ -1,459 +0,0 @@
1/*
2 * Kernel Probes (KProbes)
3 * arch/ppc64/kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
27 * for PPC64
28 */
29
30#include <linux/config.h>
31#include <linux/kprobes.h>
32#include <linux/ptrace.h>
33#include <linux/preempt.h>
34#include <asm/cacheflush.h>
35#include <asm/kdebug.h>
36#include <asm/sstep.h>
37
38static DECLARE_MUTEX(kprobe_mutex);
39DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
40DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
41
42int __kprobes arch_prepare_kprobe(struct kprobe *p)
43{
44 int ret = 0;
45 kprobe_opcode_t insn = *p->addr;
46
47 if ((unsigned long)p->addr & 0x03) {
48 printk("Attempt to register kprobe at an unaligned address\n");
49 ret = -EINVAL;
50 } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
51 printk("Cannot register a kprobe on rfid or mtmsrd\n");
52 ret = -EINVAL;
53 }
54
55 /* insn must be on a special executable page on ppc64 */
56 if (!ret) {
57 down(&kprobe_mutex);
58 p->ainsn.insn = get_insn_slot();
59 up(&kprobe_mutex);
60 if (!p->ainsn.insn)
61 ret = -ENOMEM;
62 }
63 return ret;
64}
65
66void __kprobes arch_copy_kprobe(struct kprobe *p)
67{
68 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
69 p->opcode = *p->addr;
70}
71
72void __kprobes arch_arm_kprobe(struct kprobe *p)
73{
74 *p->addr = BREAKPOINT_INSTRUCTION;
75 flush_icache_range((unsigned long) p->addr,
76 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
77}
78
79void __kprobes arch_disarm_kprobe(struct kprobe *p)
80{
81 *p->addr = p->opcode;
82 flush_icache_range((unsigned long) p->addr,
83 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
84}
85
86void __kprobes arch_remove_kprobe(struct kprobe *p)
87{
88 down(&kprobe_mutex);
89 free_insn_slot(p->ainsn.insn);
90 up(&kprobe_mutex);
91}
92
93static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
94{
95 kprobe_opcode_t insn = *p->ainsn.insn;
96
97 regs->msr |= MSR_SE;
98
99 /* single step inline if it is a trap variant */
100 if (is_trap(insn))
101 regs->nip = (unsigned long)p->addr;
102 else
103 regs->nip = (unsigned long)p->ainsn.insn;
104}
105
106static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
107{
108 kcb->prev_kprobe.kp = kprobe_running();
109 kcb->prev_kprobe.status = kcb->kprobe_status;
110 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
111}
112
113static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
114{
115 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
116 kcb->kprobe_status = kcb->prev_kprobe.status;
117 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
118}
119
120static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
121 struct kprobe_ctlblk *kcb)
122{
123 __get_cpu_var(current_kprobe) = p;
124 kcb->kprobe_saved_msr = regs->msr;
125}
126
127/* Called with kretprobe_lock held */
128void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
129 struct pt_regs *regs)
130{
131 struct kretprobe_instance *ri;
132
133 if ((ri = get_free_rp_inst(rp)) != NULL) {
134 ri->rp = rp;
135 ri->task = current;
136 ri->ret_addr = (kprobe_opcode_t *)regs->link;
137
138 /* Replace the return addr with trampoline addr */
139 regs->link = (unsigned long)kretprobe_trampoline;
140 add_rp_inst(ri);
141 } else {
142 rp->nmissed++;
143 }
144}
145
146static inline int kprobe_handler(struct pt_regs *regs)
147{
148 struct kprobe *p;
149 int ret = 0;
150 unsigned int *addr = (unsigned int *)regs->nip;
151 struct kprobe_ctlblk *kcb;
152
153 /*
154 * We don't want to be preempted for the entire
155 * duration of kprobe processing
156 */
157 preempt_disable();
158 kcb = get_kprobe_ctlblk();
159
160 /* Check we're not actually recursing */
161 if (kprobe_running()) {
162 p = get_kprobe(addr);
163 if (p) {
164 kprobe_opcode_t insn = *p->ainsn.insn;
165 if (kcb->kprobe_status == KPROBE_HIT_SS &&
166 is_trap(insn)) {
167 regs->msr &= ~MSR_SE;
168 regs->msr |= kcb->kprobe_saved_msr;
169 goto no_kprobe;
170 }
171 /* We have reentered the kprobe_handler(), since
172 * another probe was hit while within the handler.
173 * We here save the original kprobes variables and
174 * just single step on the instruction of the new probe
175 * without calling any user handlers.
176 */
177 save_previous_kprobe(kcb);
178 set_current_kprobe(p, regs, kcb);
179 kcb->kprobe_saved_msr = regs->msr;
180 p->nmissed++;
181 prepare_singlestep(p, regs);
182 kcb->kprobe_status = KPROBE_REENTER;
183 return 1;
184 } else {
185 p = __get_cpu_var(current_kprobe);
186 if (p->break_handler && p->break_handler(p, regs)) {
187 goto ss_probe;
188 }
189 }
190 goto no_kprobe;
191 }
192
193 p = get_kprobe(addr);
194 if (!p) {
195 if (*addr != BREAKPOINT_INSTRUCTION) {
196 /*
197 * PowerPC has multiple variants of the "trap"
198 * instruction. If the current instruction is a
199 * trap variant, it could belong to someone else
200 */
201 kprobe_opcode_t cur_insn = *addr;
202 if (is_trap(cur_insn))
203 goto no_kprobe;
204 /*
205 * The breakpoint instruction was removed right
206 * after we hit it. Another cpu has removed
207 * either a probepoint or a debugger breakpoint
208 * at this address. In either case, no further
209 * handling of this interrupt is appropriate.
210 */
211 ret = 1;
212 }
213 /* Not one of ours: let kernel handle it */
214 goto no_kprobe;
215 }
216
217 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
218 set_current_kprobe(p, regs, kcb);
219 if (p->pre_handler && p->pre_handler(p, regs))
220 /* handler has already set things up, so skip ss setup */
221 return 1;
222
223ss_probe:
224 prepare_singlestep(p, regs);
225 kcb->kprobe_status = KPROBE_HIT_SS;
226 return 1;
227
228no_kprobe:
229 preempt_enable_no_resched();
230 return ret;
231}
232
233/*
234 * Function return probe trampoline:
235 * - init_kprobes() establishes a probepoint here
236 * - When the probed function returns, this probe
237 * causes the handlers to fire
238 */
239void kretprobe_trampoline_holder(void)
240{
241 asm volatile(".global kretprobe_trampoline\n"
242 "kretprobe_trampoline:\n"
243 "nop\n");
244}
245
246/*
247 * Called when the probe at kretprobe trampoline is hit
248 */
249int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
250{
251 struct kretprobe_instance *ri = NULL;
252 struct hlist_head *head;
253 struct hlist_node *node, *tmp;
254 unsigned long flags, orig_ret_address = 0;
255 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
256
257 spin_lock_irqsave(&kretprobe_lock, flags);
258 head = kretprobe_inst_table_head(current);
259
260 /*
261 * It is possible to have multiple instances associated with a given
262 * task either because an multiple functions in the call path
263 * have a return probe installed on them, and/or more then one return
264 * return probe was registered for a target function.
265 *
266 * We can handle this because:
267 * - instances are always inserted at the head of the list
268 * - when multiple return probes are registered for the same
269 * function, the first instance's ret_addr will point to the
270 * real return address, and all the rest will point to
271 * kretprobe_trampoline
272 */
273 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
274 if (ri->task != current)
275 /* another task is sharing our hash bucket */
276 continue;
277
278 if (ri->rp && ri->rp->handler)
279 ri->rp->handler(ri, regs);
280
281 orig_ret_address = (unsigned long)ri->ret_addr;
282 recycle_rp_inst(ri);
283
284 if (orig_ret_address != trampoline_address)
285 /*
286 * This is the real return address. Any other
287 * instances associated with this task are for
288 * other calls deeper on the call stack
289 */
290 break;
291 }
292
293 BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
294 regs->nip = orig_ret_address;
295
296 reset_current_kprobe();
297 spin_unlock_irqrestore(&kretprobe_lock, flags);
298 preempt_enable_no_resched();
299
300 /*
301 * By returning a non-zero value, we are telling
302 * kprobe_handler() that we don't want the post_handler
303 * to run (and have re-enabled preemption)
304 */
305 return 1;
306}
307
308/*
309 * Called after single-stepping. p->addr is the address of the
310 * instruction whose first byte has been replaced by the "breakpoint"
311 * instruction. To avoid the SMP problems that can occur when we
312 * temporarily put back the original opcode to single-step, we
313 * single-stepped a copy of the instruction. The address of this
314 * copy is p->ainsn.insn.
315 */
316static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
317{
318 int ret;
319 unsigned int insn = *p->ainsn.insn;
320
321 regs->nip = (unsigned long)p->addr;
322 ret = emulate_step(regs, insn);
323 if (ret == 0)
324 regs->nip = (unsigned long)p->addr + 4;
325}
326
327static inline int post_kprobe_handler(struct pt_regs *regs)
328{
329 struct kprobe *cur = kprobe_running();
330 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
331
332 if (!cur)
333 return 0;
334
335 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
336 kcb->kprobe_status = KPROBE_HIT_SSDONE;
337 cur->post_handler(cur, regs, 0);
338 }
339
340 resume_execution(cur, regs);
341 regs->msr |= kcb->kprobe_saved_msr;
342
343 /*Restore back the original saved kprobes variables and continue. */
344 if (kcb->kprobe_status == KPROBE_REENTER) {
345 restore_previous_kprobe(kcb);
346 goto out;
347 }
348 reset_current_kprobe();
349out:
350 preempt_enable_no_resched();
351
352 /*
353 * if somebody else is singlestepping across a probe point, msr
354 * will have SE set, in which case, continue the remaining processing
355 * of do_debug, as if this is not a probe hit.
356 */
357 if (regs->msr & MSR_SE)
358 return 0;
359
360 return 1;
361}
362
363static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
364{
365 struct kprobe *cur = kprobe_running();
366 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
367
368 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
369 return 1;
370
371 if (kcb->kprobe_status & KPROBE_HIT_SS) {
372 resume_execution(cur, regs);
373 regs->msr &= ~MSR_SE;
374 regs->msr |= kcb->kprobe_saved_msr;
375
376 reset_current_kprobe();
377 preempt_enable_no_resched();
378 }
379 return 0;
380}
381
382/*
383 * Wrapper routine to for handling exceptions.
384 */
385int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
386 unsigned long val, void *data)
387{
388 struct die_args *args = (struct die_args *)data;
389 int ret = NOTIFY_DONE;
390
391 switch (val) {
392 case DIE_BPT:
393 if (kprobe_handler(args->regs))
394 ret = NOTIFY_STOP;
395 break;
396 case DIE_SSTEP:
397 if (post_kprobe_handler(args->regs))
398 ret = NOTIFY_STOP;
399 break;
400 case DIE_PAGE_FAULT:
401 /* kprobe_running() needs smp_processor_id() */
402 preempt_disable();
403 if (kprobe_running() &&
404 kprobe_fault_handler(args->regs, args->trapnr))
405 ret = NOTIFY_STOP;
406 preempt_enable();
407 break;
408 default:
409 break;
410 }
411 return ret;
412}
413
414int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
415{
416 struct jprobe *jp = container_of(p, struct jprobe, kp);
417 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
418
419 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
420
421 /* setup return addr to the jprobe handler routine */
422 regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
423 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
424
425 return 1;
426}
427
428void __kprobes jprobe_return(void)
429{
430 asm volatile("trap" ::: "memory");
431}
432
433void __kprobes jprobe_return_end(void)
434{
435};
436
437int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
438{
439 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
440
441 /*
442 * FIXME - we should ideally be validating that we got here 'cos
443 * of the "trap" in jprobe_return() above, before restoring the
444 * saved regs...
445 */
446 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
447 preempt_enable_no_resched();
448 return 1;
449}
450
451static struct kprobe trampoline_p = {
452 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
453 .pre_handler = trampoline_probe_handler
454};
455
456int __init arch_init_kprobes(void)
457{
458 return register_kprobe(&trampoline_p);
459}
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
deleted file mode 100644
index 97c51e452be7..000000000000
--- a/arch/ppc64/kernel/machine_kexec.c
+++ /dev/null
@@ -1,358 +0,0 @@
1/*
2 * machine_kexec.c - handle transition of Linux booting another kernel
3 *
4 * Copyright (C) 2004-2005, IBM Corp.
5 *
6 * Created by: Milton D Miller II
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12
13#include <linux/cpumask.h>
14#include <linux/kexec.h>
15#include <linux/smp.h>
16#include <linux/thread_info.h>
17#include <linux/errno.h>
18
19#include <asm/page.h>
20#include <asm/current.h>
21#include <asm/machdep.h>
22#include <asm/cacheflush.h>
23#include <asm/paca.h>
24#include <asm/mmu.h>
25#include <asm/sections.h> /* _end */
26#include <asm/prom.h>
27#include <asm/smp.h>
28
29#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */
30
31/* Have this around till we move it into crash specific file */
32note_buf_t crash_notes[NR_CPUS];
33
34/* Dummy for now. Not sure if we need to have a crash shutdown in here
35 * and if what it will achieve. Letting it be now to compile the code
36 * in generic kexec environment
37 */
38void machine_crash_shutdown(struct pt_regs *regs)
39{
40 /* do nothing right now */
41 /* smp_relase_cpus() if we want smp on panic kernel */
42 /* cpu_irq_down to isolate us until we are ready */
43}
44
45int machine_kexec_prepare(struct kimage *image)
46{
47 int i;
48 unsigned long begin, end; /* limits of segment */
49 unsigned long low, high; /* limits of blocked memory range */
50 struct device_node *node;
51 unsigned long *basep;
52 unsigned int *sizep;
53
54 if (!ppc_md.hpte_clear_all)
55 return -ENOENT;
56
57 /*
58 * Since we use the kernel fault handlers and paging code to
59 * handle the virtual mode, we must make sure no destination
60 * overlaps kernel static data or bss.
61 */
62 for (i = 0; i < image->nr_segments; i++)
63 if (image->segment[i].mem < __pa(_end))
64 return -ETXTBSY;
65
66 /*
67 * For non-LPAR, we absolutely can not overwrite the mmu hash
68 * table, since we are still using the bolted entries in it to
69 * do the copy. Check that here.
70 *
71 * It is safe if the end is below the start of the blocked
72 * region (end <= low), or if the beginning is after the
73 * end of the blocked region (begin >= high). Use the
74 * boolean identity !(a || b) === (!a && !b).
75 */
76 if (htab_address) {
77 low = __pa(htab_address);
78 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
79
80 for (i = 0; i < image->nr_segments; i++) {
81 begin = image->segment[i].mem;
82 end = begin + image->segment[i].memsz;
83
84 if ((begin < high) && (end > low))
85 return -ETXTBSY;
86 }
87 }
88
89 /* We also should not overwrite the tce tables */
90 for (node = of_find_node_by_type(NULL, "pci"); node != NULL;
91 node = of_find_node_by_type(node, "pci")) {
92 basep = (unsigned long *)get_property(node, "linux,tce-base",
93 NULL);
94 sizep = (unsigned int *)get_property(node, "linux,tce-size",
95 NULL);
96 if (basep == NULL || sizep == NULL)
97 continue;
98
99 low = *basep;
100 high = low + (*sizep);
101
102 for (i = 0; i < image->nr_segments; i++) {
103 begin = image->segment[i].mem;
104 end = begin + image->segment[i].memsz;
105
106 if ((begin < high) && (end > low))
107 return -ETXTBSY;
108 }
109 }
110
111 return 0;
112}
113
114void machine_kexec_cleanup(struct kimage *image)
115{
116 /* we do nothing in prepare that needs to be undone */
117}
118
119#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
120
121static void copy_segments(unsigned long ind)
122{
123 unsigned long entry;
124 unsigned long *ptr;
125 void *dest;
126 void *addr;
127
128 /*
129 * We rely on kexec_load to create a lists that properly
130 * initializes these pointers before they are used.
131 * We will still crash if the list is wrong, but at least
132 * the compiler will be quiet.
133 */
134 ptr = NULL;
135 dest = NULL;
136
137 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
138 addr = __va(entry & PAGE_MASK);
139
140 switch (entry & IND_FLAGS) {
141 case IND_DESTINATION:
142 dest = addr;
143 break;
144 case IND_INDIRECTION:
145 ptr = addr;
146 break;
147 case IND_SOURCE:
148 copy_page(dest, addr);
149 dest += PAGE_SIZE;
150 }
151 }
152}
153
154void kexec_copy_flush(struct kimage *image)
155{
156 long i, nr_segments = image->nr_segments;
157 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
158
159 /* save the ranges on the stack to efficiently flush the icache */
160 memcpy(ranges, image->segment, sizeof(ranges));
161
162 /*
163 * After this call we may not use anything allocated in dynamic
164 * memory, including *image.
165 *
166 * Only globals and the stack are allowed.
167 */
168 copy_segments(image->head);
169
170 /*
171 * we need to clear the icache for all dest pages sometime,
172 * including ones that were in place on the original copy
173 */
174 for (i = 0; i < nr_segments; i++)
175 flush_icache_range(ranges[i].mem + KERNELBASE,
176 ranges[i].mem + KERNELBASE +
177 ranges[i].memsz);
178}
179
180#ifdef CONFIG_SMP
181
182/* FIXME: we should schedule this function to be called on all cpus based
183 * on calling the interrupts, but we would like to call it off irq level
184 * so that the interrupt controller is clean.
185 */
186void kexec_smp_down(void *arg)
187{
188 if (ppc_md.kexec_cpu_down)
189 ppc_md.kexec_cpu_down(0, 1);
190
191 local_irq_disable();
192 kexec_smp_wait();
193 /* NOTREACHED */
194}
195
196static void kexec_prepare_cpus(void)
197{
198 int my_cpu, i, notified=-1;
199
200 smp_call_function(kexec_smp_down, NULL, 0, /* wait */0);
201 my_cpu = get_cpu();
202
203 /* check the others cpus are now down (via paca hw cpu id == -1) */
204 for (i=0; i < NR_CPUS; i++) {
205 if (i == my_cpu)
206 continue;
207
208 while (paca[i].hw_cpu_id != -1) {
209 barrier();
210 if (!cpu_possible(i)) {
211 printk("kexec: cpu %d hw_cpu_id %d is not"
212 " possible, ignoring\n",
213 i, paca[i].hw_cpu_id);
214 break;
215 }
216 if (!cpu_online(i)) {
217 /* Fixme: this can be spinning in
218 * pSeries_secondary_wait with a paca
219 * waiting for it to go online.
220 */
221 printk("kexec: cpu %d hw_cpu_id %d is not"
222 " online, ignoring\n",
223 i, paca[i].hw_cpu_id);
224 break;
225 }
226 if (i != notified) {
227 printk( "kexec: waiting for cpu %d (physical"
228 " %d) to go down\n",
229 i, paca[i].hw_cpu_id);
230 notified = i;
231 }
232 }
233 }
234
235 /* after we tell the others to go down */
236 if (ppc_md.kexec_cpu_down)
237 ppc_md.kexec_cpu_down(0, 0);
238
239 put_cpu();
240
241 local_irq_disable();
242}
243
244#else /* ! SMP */
245
246static void kexec_prepare_cpus(void)
247{
248 /*
249 * move the secondarys to us so that we can copy
250 * the new kernel 0-0x100 safely
251 *
252 * do this if kexec in setup.c ?
253 *
254 * We need to release the cpus if we are ever going from an
255 * UP to an SMP kernel.
256 */
257 smp_release_cpus();
258 if (ppc_md.kexec_cpu_down)
259 ppc_md.kexec_cpu_down(0, 0);
260 local_irq_disable();
261}
262
263#endif /* SMP */
264
265/*
266 * kexec thread structure and stack.
267 *
268 * We need to make sure that this is 16384-byte aligned due to the
269 * way process stacks are handled. It also must be statically allocated
270 * or allocated as part of the kimage, because everything else may be
271 * overwritten when we copy the kexec image. We piggyback on the
272 * "init_task" linker section here to statically allocate a stack.
273 *
274 * We could use a smaller stack if we don't care about anything using
275 * current, but that audit has not been performed.
276 */
277union thread_union kexec_stack
278 __attribute__((__section__(".data.init_task"))) = { };
279
280/* Our assembly helper, in kexec_stub.S */
281extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
282 void *image, void *control,
283 void (*clear_all)(void)) ATTRIB_NORET;
284
285/* too late to fail here */
286void machine_kexec(struct kimage *image)
287{
288
289 /* prepare control code if any */
290
291 /* shutdown other cpus into our wait loop and quiesce interrupts */
292 kexec_prepare_cpus();
293
294 /* switch to a staticly allocated stack. Based on irq stack code.
295 * XXX: the task struct will likely be invalid once we do the copy!
296 */
297 kexec_stack.thread_info.task = current_thread_info()->task;
298 kexec_stack.thread_info.flags = 0;
299
300 /* Some things are best done in assembly. Finding globals with
301 * a toc is easier in C, so pass in what we can.
302 */
303 kexec_sequence(&kexec_stack, image->start, image,
304 page_address(image->control_code_page),
305 ppc_md.hpte_clear_all);
306 /* NOTREACHED */
307}
308
309/* Values we need to export to the second kernel via the device tree. */
310static unsigned long htab_base, htab_size, kernel_end;
311
312static struct property htab_base_prop = {
313 .name = "linux,htab-base",
314 .length = sizeof(unsigned long),
315 .value = (unsigned char *)&htab_base,
316};
317
318static struct property htab_size_prop = {
319 .name = "linux,htab-size",
320 .length = sizeof(unsigned long),
321 .value = (unsigned char *)&htab_size,
322};
323
324static struct property kernel_end_prop = {
325 .name = "linux,kernel-end",
326 .length = sizeof(unsigned long),
327 .value = (unsigned char *)&kernel_end,
328};
329
330static void __init export_htab_values(void)
331{
332 struct device_node *node;
333
334 node = of_find_node_by_path("/chosen");
335 if (!node)
336 return;
337
338 kernel_end = __pa(_end);
339 prom_add_property(node, &kernel_end_prop);
340
341 /* On machines with no htab htab_address is NULL */
342 if (NULL == htab_address)
343 goto out;
344
345 htab_base = __pa(htab_address);
346 prom_add_property(node, &htab_base_prop);
347
348 htab_size = 1UL << ppc64_pft_size;
349 prom_add_property(node, &htab_size_prop);
350
351 out:
352 of_node_put(node);
353}
354
355void __init kexec_setup(void)
356{
357 export_htab_values();
358}
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
deleted file mode 100644
index 5e089deb0a2b..000000000000
--- a/arch/ppc64/kernel/misc.S
+++ /dev/null
@@ -1,940 +0,0 @@
1/*
2 * arch/ppc/kernel/misc.S
3 *
4 *
5 *
6 * This file contains miscellaneous low-level functions.
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras.
11 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
12 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/sys.h>
23#include <asm/unistd.h>
24#include <asm/errno.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/cache.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/cputable.h>
31#include <asm/thread_info.h>
32
33 .text
34
35/*
36 * Returns (address we were linked at) - (address we are running at)
37 * for use before the text and data are mapped to KERNELBASE.
38 */
39
40_GLOBAL(reloc_offset)
41 mflr r0
42 bl 1f
431: mflr r3
44 LOADADDR(r4,1b)
45 sub r3,r4,r3
46 mtlr r0
47 blr
48
49_GLOBAL(get_msr)
50 mfmsr r3
51 blr
52
53_GLOBAL(get_dar)
54 mfdar r3
55 blr
56
57_GLOBAL(get_srr0)
58 mfsrr0 r3
59 blr
60
61_GLOBAL(get_srr1)
62 mfsrr1 r3
63 blr
64
65_GLOBAL(get_sp)
66 mr r3,r1
67 blr
68
69#ifdef CONFIG_IRQSTACKS
70_GLOBAL(call_do_softirq)
71 mflr r0
72 std r0,16(r1)
73 stdu r1,THREAD_SIZE-112(r3)
74 mr r1,r3
75 bl .__do_softirq
76 ld r1,0(r1)
77 ld r0,16(r1)
78 mtlr r0
79 blr
80
81_GLOBAL(call___do_IRQ)
82 mflr r0
83 std r0,16(r1)
84 stdu r1,THREAD_SIZE-112(r5)
85 mr r1,r5
86 bl .__do_IRQ
87 ld r1,0(r1)
88 ld r0,16(r1)
89 mtlr r0
90 blr
91#endif /* CONFIG_IRQSTACKS */
92
93 /*
94 * To be called by C code which needs to do some operations with MMU
95 * disabled. Note that interrupts have to be disabled by the caller
96 * prior to calling us. The code called _MUST_ be in the RMO of course
97 * and part of the linear mapping as we don't attempt to translate the
98 * stack pointer at all. The function is called with the stack switched
99 * to this CPU emergency stack
100 *
101 * prototype is void *call_with_mmu_off(void *func, void *data);
102 *
103 * the called function is expected to be of the form
104 *
105 * void *called(void *data);
106 */
107_GLOBAL(call_with_mmu_off)
108 mflr r0 /* get link, save it on stackframe */
109 std r0,16(r1)
110 mr r1,r5 /* save old stack ptr */
111 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
112 subi r1,r1,STACK_FRAME_OVERHEAD
113 std r0,16(r1) /* save link on emerg. stack */
114 std r5,0(r1) /* save old stack ptr in backchain */
115 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
116 bl 2f /* we need LR to return, continue at label 2 */
117
118 ld r0,16(r1) /* we return here from the call, get LR and */
119 ld r1,0(r1) /* .. old stack ptr */
120 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
121 mfmsr r4
122 ori r4,r4,MSR_IR|MSR_DR
123 mtspr SPRN_SRR1,r4
124 rfid
125
1262: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
127 mr r3,r4 /* get parameter */
128 mfmsr r0
129 ori r0,r0,MSR_IR|MSR_DR
130 xori r0,r0,MSR_IR|MSR_DR
131 mtspr SPRN_SRR1,r0
132 rfid
133
134
135 .section ".toc","aw"
136PPC64_CACHES:
137 .tc ppc64_caches[TC],ppc64_caches
138 .section ".text"
139
140/*
141 * Write any modified data cache blocks out to memory
142 * and invalidate the corresponding instruction cache blocks.
143 *
144 * flush_icache_range(unsigned long start, unsigned long stop)
145 *
146 * flush all bytes from start through stop-1 inclusive
147 */
148
149_KPROBE(__flush_icache_range)
150
151/*
152 * Flush the data cache to memory
153 *
154 * Different systems have different cache line sizes
155 * and in some cases i-cache and d-cache line sizes differ from
156 * each other.
157 */
158 ld r10,PPC64_CACHES@toc(r2)
159 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
160 addi r5,r7,-1
161 andc r6,r3,r5 /* round low to line bdy */
162 subf r8,r6,r4 /* compute length */
163 add r8,r8,r5 /* ensure we get enough */
164 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
165 srw. r8,r8,r9 /* compute line count */
166 beqlr /* nothing to do? */
167 mtctr r8
1681: dcbst 0,r6
169 add r6,r6,r7
170 bdnz 1b
171 sync
172
173/* Now invalidate the instruction cache */
174
175 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
176 addi r5,r7,-1
177 andc r6,r3,r5 /* round low to line bdy */
178 subf r8,r6,r4 /* compute length */
179 add r8,r8,r5
180 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
181 srw. r8,r8,r9 /* compute line count */
182 beqlr /* nothing to do? */
183 mtctr r8
1842: icbi 0,r6
185 add r6,r6,r7
186 bdnz 2b
187 isync
188 blr
189
190 .text
191/*
192 * Like above, but only do the D-cache.
193 *
194 * flush_dcache_range(unsigned long start, unsigned long stop)
195 *
196 * flush all bytes from start to stop-1 inclusive
197 */
198_GLOBAL(flush_dcache_range)
199
200/*
201 * Flush the data cache to memory
202 *
203 * Different systems have different cache line sizes
204 */
205 ld r10,PPC64_CACHES@toc(r2)
206 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
207 addi r5,r7,-1
208 andc r6,r3,r5 /* round low to line bdy */
209 subf r8,r6,r4 /* compute length */
210 add r8,r8,r5 /* ensure we get enough */
211 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
212 srw. r8,r8,r9 /* compute line count */
213 beqlr /* nothing to do? */
214 mtctr r8
2150: dcbst 0,r6
216 add r6,r6,r7
217 bdnz 0b
218 sync
219 blr
220
221/*
222 * Like above, but works on non-mapped physical addresses.
223 * Use only for non-LPAR setups ! It also assumes real mode
224 * is cacheable. Used for flushing out the DART before using
225 * it as uncacheable memory
226 *
227 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
228 *
229 * flush all bytes from start to stop-1 inclusive
230 */
231_GLOBAL(flush_dcache_phys_range)
232 ld r10,PPC64_CACHES@toc(r2)
233 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
234 addi r5,r7,-1
235 andc r6,r3,r5 /* round low to line bdy */
236 subf r8,r6,r4 /* compute length */
237 add r8,r8,r5 /* ensure we get enough */
238 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
239 srw. r8,r8,r9 /* compute line count */
240 beqlr /* nothing to do? */
241 mfmsr r5 /* Disable MMU Data Relocation */
242 ori r0,r5,MSR_DR
243 xori r0,r0,MSR_DR
244 sync
245 mtmsr r0
246 sync
247 isync
248 mtctr r8
2490: dcbst 0,r6
250 add r6,r6,r7
251 bdnz 0b
252 sync
253 isync
254 mtmsr r5 /* Re-enable MMU Data Relocation */
255 sync
256 isync
257 blr
258
259_GLOBAL(flush_inval_dcache_range)
260 ld r10,PPC64_CACHES@toc(r2)
261 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
262 addi r5,r7,-1
263 andc r6,r3,r5 /* round low to line bdy */
264 subf r8,r6,r4 /* compute length */
265 add r8,r8,r5 /* ensure we get enough */
266 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
267 srw. r8,r8,r9 /* compute line count */
268 beqlr /* nothing to do? */
269 sync
270 isync
271 mtctr r8
2720: dcbf 0,r6
273 add r6,r6,r7
274 bdnz 0b
275 sync
276 isync
277 blr
278
279
280/*
281 * Flush a particular page from the data cache to RAM.
282 * Note: this is necessary because the instruction cache does *not*
283 * snoop from the data cache.
284 *
285 * void __flush_dcache_icache(void *page)
286 */
287_GLOBAL(__flush_dcache_icache)
288/*
289 * Flush the data cache to memory
290 *
291 * Different systems have different cache line sizes
292 */
293
294/* Flush the dcache */
295 ld r7,PPC64_CACHES@toc(r2)
296 clrrdi r3,r3,PAGE_SHIFT /* Page align */
297 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
298 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
299 mr r6,r3
300 mtctr r4
3010: dcbst 0,r6
302 add r6,r6,r5
303 bdnz 0b
304 sync
305
306/* Now invalidate the icache */
307
308 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
309 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
310 mtctr r4
3111: icbi 0,r3
312 add r3,r3,r5
313 bdnz 1b
314 isync
315 blr
316
317/*
318 * I/O string operations
319 *
320 * insb(port, buf, len)
321 * outsb(port, buf, len)
322 * insw(port, buf, len)
323 * outsw(port, buf, len)
324 * insl(port, buf, len)
325 * outsl(port, buf, len)
326 * insw_ns(port, buf, len)
327 * outsw_ns(port, buf, len)
328 * insl_ns(port, buf, len)
329 * outsl_ns(port, buf, len)
330 *
331 * The *_ns versions don't do byte-swapping.
332 */
333_GLOBAL(_insb)
334 cmpwi 0,r5,0
335 mtctr r5
336 subi r4,r4,1
337 blelr-
33800: lbz r5,0(r3)
339 eieio
340 stbu r5,1(r4)
341 bdnz 00b
342 twi 0,r5,0
343 isync
344 blr
345
346_GLOBAL(_outsb)
347 cmpwi 0,r5,0
348 mtctr r5
349 subi r4,r4,1
350 blelr-
35100: lbzu r5,1(r4)
352 stb r5,0(r3)
353 bdnz 00b
354 sync
355 blr
356
357_GLOBAL(_insw)
358 cmpwi 0,r5,0
359 mtctr r5
360 subi r4,r4,2
361 blelr-
36200: lhbrx r5,0,r3
363 eieio
364 sthu r5,2(r4)
365 bdnz 00b
366 twi 0,r5,0
367 isync
368 blr
369
370_GLOBAL(_outsw)
371 cmpwi 0,r5,0
372 mtctr r5
373 subi r4,r4,2
374 blelr-
37500: lhzu r5,2(r4)
376 sthbrx r5,0,r3
377 bdnz 00b
378 sync
379 blr
380
381_GLOBAL(_insl)
382 cmpwi 0,r5,0
383 mtctr r5
384 subi r4,r4,4
385 blelr-
38600: lwbrx r5,0,r3
387 eieio
388 stwu r5,4(r4)
389 bdnz 00b
390 twi 0,r5,0
391 isync
392 blr
393
394_GLOBAL(_outsl)
395 cmpwi 0,r5,0
396 mtctr r5
397 subi r4,r4,4
398 blelr-
39900: lwzu r5,4(r4)
400 stwbrx r5,0,r3
401 bdnz 00b
402 sync
403 blr
404
405/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
406_GLOBAL(_insw_ns)
407 cmpwi 0,r5,0
408 mtctr r5
409 subi r4,r4,2
410 blelr-
41100: lhz r5,0(r3)
412 eieio
413 sthu r5,2(r4)
414 bdnz 00b
415 twi 0,r5,0
416 isync
417 blr
418
419/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
420_GLOBAL(_outsw_ns)
421 cmpwi 0,r5,0
422 mtctr r5
423 subi r4,r4,2
424 blelr-
42500: lhzu r5,2(r4)
426 sth r5,0(r3)
427 bdnz 00b
428 sync
429 blr
430
431_GLOBAL(_insl_ns)
432 cmpwi 0,r5,0
433 mtctr r5
434 subi r4,r4,4
435 blelr-
43600: lwz r5,0(r3)
437 eieio
438 stwu r5,4(r4)
439 bdnz 00b
440 twi 0,r5,0
441 isync
442 blr
443
444_GLOBAL(_outsl_ns)
445 cmpwi 0,r5,0
446 mtctr r5
447 subi r4,r4,4
448 blelr-
44900: lwzu r5,4(r4)
450 stw r5,0(r3)
451 bdnz 00b
452 sync
453 blr
454
455/*
456 * identify_cpu and calls setup_cpu
457 * In: r3 = base of the cpu_specs array
458 * r4 = address of cur_cpu_spec
459 * r5 = relocation offset
460 */
461_GLOBAL(identify_cpu)
462 mfpvr r7
4631:
464 lwz r8,CPU_SPEC_PVR_MASK(r3)
465 and r8,r8,r7
466 lwz r9,CPU_SPEC_PVR_VALUE(r3)
467 cmplw 0,r9,r8
468 beq 1f
469 addi r3,r3,CPU_SPEC_ENTRY_SIZE
470 b 1b
4711:
472 add r0,r3,r5
473 std r0,0(r4)
474 ld r4,CPU_SPEC_SETUP(r3)
475 sub r4,r4,r5
476 ld r4,0(r4)
477 sub r4,r4,r5
478 mtctr r4
479 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
480 mr r4,r3
481 mr r3,r5
482 bctr
483
484/*
485 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
486 * and writes nop's over sections of code that don't apply for this cpu.
487 * r3 = data offset (not changed)
488 */
489_GLOBAL(do_cpu_ftr_fixups)
490 /* Get CPU 0 features */
491 LOADADDR(r6,cur_cpu_spec)
492 sub r6,r6,r3
493 ld r4,0(r6)
494 sub r4,r4,r3
495 ld r4,CPU_SPEC_FEATURES(r4)
496 /* Get the fixup table */
497 LOADADDR(r6,__start___ftr_fixup)
498 sub r6,r6,r3
499 LOADADDR(r7,__stop___ftr_fixup)
500 sub r7,r7,r3
501 /* Do the fixup */
5021: cmpld r6,r7
503 bgelr
504 addi r6,r6,32
505 ld r8,-32(r6) /* mask */
506 and r8,r8,r4
507 ld r9,-24(r6) /* value */
508 cmpld r8,r9
509 beq 1b
510 ld r8,-16(r6) /* section begin */
511 ld r9,-8(r6) /* section end */
512 subf. r9,r8,r9
513 beq 1b
514 /* write nops over the section of code */
515 /* todo: if large section, add a branch at the start of it */
516 srwi r9,r9,2
517 mtctr r9
518 sub r8,r8,r3
519 lis r0,0x60000000@h /* nop */
5203: stw r0,0(r8)
521 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
522 beq 2f
523 dcbst 0,r8 /* suboptimal, but simpler */
524 sync
525 icbi 0,r8
5262: addi r8,r8,4
527 bdnz 3b
528 sync /* additional sync needed on g4 */
529 isync
530 b 1b
531
532#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
533/*
534 * Do an IO access in real mode
535 */
536_GLOBAL(real_readb)
537 mfmsr r7
538 ori r0,r7,MSR_DR
539 xori r0,r0,MSR_DR
540 sync
541 mtmsrd r0
542 sync
543 isync
544 mfspr r6,SPRN_HID4
545 rldicl r5,r6,32,0
546 ori r5,r5,0x100
547 rldicl r5,r5,32,0
548 sync
549 mtspr SPRN_HID4,r5
550 isync
551 slbia
552 isync
553 lbz r3,0(r3)
554 sync
555 mtspr SPRN_HID4,r6
556 isync
557 slbia
558 isync
559 mtmsrd r7
560 sync
561 isync
562 blr
563
564/*
565 * Do an IO access in real mode
566 */
567_GLOBAL(real_writeb)
568 mfmsr r7
569 ori r0,r7,MSR_DR
570 xori r0,r0,MSR_DR
571 sync
572 mtmsrd r0
573 sync
574 isync
575 mfspr r6,SPRN_HID4
576 rldicl r5,r6,32,0
577 ori r5,r5,0x100
578 rldicl r5,r5,32,0
579 sync
580 mtspr SPRN_HID4,r5
581 isync
582 slbia
583 isync
584 stb r3,0(r4)
585 sync
586 mtspr SPRN_HID4,r6
587 isync
588 slbia
589 isync
590 mtmsrd r7
591 sync
592 isync
593 blr
594#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
595
596/*
597 * SCOM access functions for 970 (FX only for now)
598 *
599 * unsigned long scom970_read(unsigned int address);
600 * void scom970_write(unsigned int address, unsigned long value);
601 *
602 * The address passed in is the 24 bits register address. This code
603 * is 970 specific and will not check the status bits, so you should
604 * know what you are doing.
605 */
606_GLOBAL(scom970_read)
607 /* interrupts off */
608 mfmsr r4
609 ori r0,r4,MSR_EE
610 xori r0,r0,MSR_EE
611 mtmsrd r0,1
612
613 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
614 * (including parity). On current CPUs they must be 0'd,
615 * and finally or in RW bit
616 */
617 rlwinm r3,r3,8,0,15
618 ori r3,r3,0x8000
619
620 /* do the actual scom read */
621 sync
622 mtspr SPRN_SCOMC,r3
623 isync
624 mfspr r3,SPRN_SCOMD
625 isync
626 mfspr r0,SPRN_SCOMC
627 isync
628
629 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
630 * that's the best we can do). Not implemented yet as we don't use
631 * the scom on any of the bogus CPUs yet, but may have to be done
632 * ultimately
633 */
634
635 /* restore interrupts */
636 mtmsrd r4,1
637 blr
638
639
640_GLOBAL(scom970_write)
641 /* interrupts off */
642 mfmsr r5
643 ori r0,r5,MSR_EE
644 xori r0,r0,MSR_EE
645 mtmsrd r0,1
646
647 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
648 * (including parity). On current CPUs they must be 0'd.
649 */
650
651 rlwinm r3,r3,8,0,15
652
653 sync
654 mtspr SPRN_SCOMD,r4 /* write data */
655 isync
656 mtspr SPRN_SCOMC,r3 /* write command */
657 isync
658 mfspr 3,SPRN_SCOMC
659 isync
660
661 /* restore interrupts */
662 mtmsrd r5,1
663 blr
664
665
666/*
667 * Create a kernel thread
668 * kernel_thread(fn, arg, flags)
669 */
670_GLOBAL(kernel_thread)
671 std r29,-24(r1)
672 std r30,-16(r1)
673 stdu r1,-STACK_FRAME_OVERHEAD(r1)
674 mr r29,r3
675 mr r30,r4
676 ori r3,r5,CLONE_VM /* flags */
677 oris r3,r3,(CLONE_UNTRACED>>16)
678 li r4,0 /* new sp (unused) */
679 li r0,__NR_clone
680 sc
681 cmpdi 0,r3,0 /* parent or child? */
682 bne 1f /* return if parent */
683 li r0,0
684 stdu r0,-STACK_FRAME_OVERHEAD(r1)
685 ld r2,8(r29)
686 ld r29,0(r29)
687 mtlr r29 /* fn addr in lr */
688 mr r3,r30 /* load arg and call fn */
689 blrl
690 li r0,__NR_exit /* exit after child exits */
691 li r3,0
692 sc
6931: addi r1,r1,STACK_FRAME_OVERHEAD
694 ld r29,-24(r1)
695 ld r30,-16(r1)
696 blr
697
698/*
699 * disable_kernel_fp()
700 * Disable the FPU.
701 */
702_GLOBAL(disable_kernel_fp)
703 mfmsr r3
704 rldicl r0,r3,(63-MSR_FP_LG),1
705 rldicl r3,r0,(MSR_FP_LG+1),0
706 mtmsrd r3 /* disable use of fpu now */
707 isync
708 blr
709
710#ifdef CONFIG_ALTIVEC
711
712#if 0 /* this has no callers for now */
713/*
714 * disable_kernel_altivec()
715 * Disable the VMX.
716 */
717_GLOBAL(disable_kernel_altivec)
718 mfmsr r3
719 rldicl r0,r3,(63-MSR_VEC_LG),1
720 rldicl r3,r0,(MSR_VEC_LG+1),0
721 mtmsrd r3 /* disable use of VMX now */
722 isync
723 blr
724#endif /* 0 */
725
726/*
727 * giveup_altivec(tsk)
728 * Disable VMX for the task given as the argument,
729 * and save the vector registers in its thread_struct.
730 * Enables the VMX for use in the kernel on return.
731 */
732_GLOBAL(giveup_altivec)
733 mfmsr r5
734 oris r5,r5,MSR_VEC@h
735 mtmsrd r5 /* enable use of VMX now */
736 isync
737 cmpdi 0,r3,0
738 beqlr- /* if no previous owner, done */
739 addi r3,r3,THREAD /* want THREAD of task */
740 ld r5,PT_REGS(r3)
741 cmpdi 0,r5,0
742 SAVE_32VRS(0,r4,r3)
743 mfvscr vr0
744 li r4,THREAD_VSCR
745 stvx vr0,r4,r3
746 beq 1f
747 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
748 lis r3,MSR_VEC@h
749 andc r4,r4,r3 /* disable FP for previous task */
750 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7511:
752#ifndef CONFIG_SMP
753 li r5,0
754 ld r4,last_task_used_altivec@got(r2)
755 std r5,0(r4)
756#endif /* CONFIG_SMP */
757 blr
758
759#endif /* CONFIG_ALTIVEC */
760
761_GLOBAL(__setup_cpu_power3)
762 blr
763
764_GLOBAL(execve)
765 li r0,__NR_execve
766 sc
767 bnslr
768 neg r3,r3
769 blr
770
771/* kexec_wait(phys_cpu)
772 *
773 * wait for the flag to change, indicating this kernel is going away but
774 * the slave code for the next one is at addresses 0 to 100.
775 *
776 * This is used by all slaves.
777 *
778 * Physical (hardware) cpu id should be in r3.
779 */
780_GLOBAL(kexec_wait)
781 bl 1f
7821: mflr r5
783 addi r5,r5,kexec_flag-1b
784
78599: HMT_LOW
786#ifdef CONFIG_KEXEC /* use no memory without kexec */
787 lwz r4,0(r5)
788 cmpwi 0,r4,0
789 bnea 0x60
790#endif
791 b 99b
792
793/* this can be in text because we won't change it until we are
794 * running in real anyways
795 */
796kexec_flag:
797 .long 0
798
799
800#ifdef CONFIG_KEXEC
801
802/* kexec_smp_wait(void)
803 *
804 * call with interrupts off
805 * note: this is a terminal routine, it does not save lr
806 *
807 * get phys id from paca
808 * set paca id to -1 to say we got here
809 * switch to real mode
810 * join other cpus in kexec_wait(phys_id)
811 */
812_GLOBAL(kexec_smp_wait)
813 lhz r3,PACAHWCPUID(r13)
814 li r4,-1
815 sth r4,PACAHWCPUID(r13) /* let others know we left */
816 bl real_mode
817 b .kexec_wait
818
819/*
820 * switch to real mode (turn mmu off)
821 * we use the early kernel trick that the hardware ignores bits
822 * 0 and 1 (big endian) of the effective address in real mode
823 *
824 * don't overwrite r3 here, it is live for kexec_wait above.
825 */
826real_mode: /* assume normal blr return */
8271: li r9,MSR_RI
828 li r10,MSR_DR|MSR_IR
829 mflr r11 /* return address to SRR0 */
830 mfmsr r12
831 andc r9,r12,r9
832 andc r10,r12,r10
833
834 mtmsrd r9,1
835 mtspr SPRN_SRR1,r10
836 mtspr SPRN_SRR0,r11
837 rfid
838
839
840/*
841 * kexec_sequence(newstack, start, image, control, clear_all())
842 *
843 * does the grungy work with stack switching and real mode switches
844 * also does simple calls to other code
845 */
846
847_GLOBAL(kexec_sequence)
848 mflr r0
849 std r0,16(r1)
850
851 /* switch stacks to newstack -- &kexec_stack.stack */
852 stdu r1,THREAD_SIZE-112(r3)
853 mr r1,r3
854
855 li r0,0
856 std r0,16(r1)
857
858 /* save regs for local vars on new stack.
859 * yes, we won't go back, but ...
860 */
861 std r31,-8(r1)
862 std r30,-16(r1)
863 std r29,-24(r1)
864 std r28,-32(r1)
865 std r27,-40(r1)
866 std r26,-48(r1)
867 std r25,-56(r1)
868
869 stdu r1,-112-64(r1)
870
871 /* save args into preserved regs */
872 mr r31,r3 /* newstack (both) */
873 mr r30,r4 /* start (real) */
874 mr r29,r5 /* image (virt) */
875 mr r28,r6 /* control, unused */
876 mr r27,r7 /* clear_all() fn desc */
877 mr r26,r8 /* spare */
878 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
879
880 /* disable interrupts, we are overwriting kernel data next */
881 mfmsr r3
882 rlwinm r3,r3,0,17,15
883 mtmsrd r3,1
884
885 /* copy dest pages, flush whole dest image */
886 mr r3,r29
887 bl .kexec_copy_flush /* (image) */
888
889 /* turn off mmu */
890 bl real_mode
891
892 /* clear out hardware hash page table and tlb */
893 ld r5,0(r27) /* deref function descriptor */
894 mtctr r5
895 bctrl /* ppc_md.hash_clear_all(void); */
896
897/*
898 * kexec image calling is:
899 * the first 0x100 bytes of the entry point are copied to 0
900 *
901 * all slaves branch to slave = 0x60 (absolute)
902 * slave(phys_cpu_id);
903 *
904 * master goes to start = entry point
905 * start(phys_cpu_id, start, 0);
906 *
907 *
908 * a wrapper is needed to call existing kernels, here is an approximate
909 * description of one method:
910 *
911 * v2: (2.6.10)
912 * start will be near the boot_block (maybe 0x100 bytes before it?)
913 * it will have a 0x60, which will b to boot_block, where it will wait
914 * and 0 will store phys into struct boot-block and load r3 from there,
915 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
916 *
917 * v1: (2.6.9)
918 * boot block will have all cpus scanning device tree to see if they
919 * are the boot cpu ?????
920 * other device tree differences (prop sizes, va vs pa, etc)...
921 */
922
923 /* copy 0x100 bytes starting at start to 0 */
924 li r3,0
925 mr r4,r30
926 li r5,0x100
927 li r6,0
928 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
9291: /* assume normal blr return */
930
931 /* release other cpus to the new kernel secondary start at 0x60 */
932 mflr r5
933 li r6,1
934 stw r6,kexec_flag-1b(5)
935 mr r3,r25 # my phys cpu
936 mr r4,r30 # start, aka phys mem offset
937 mtlr 4
938 li r5,0
939 blr /* image->start(physid, image->start, 0); */
940#endif /* CONFIG_KEXEC */
diff --git a/arch/ppc64/kernel/module.c b/arch/ppc64/kernel/module.c
deleted file mode 100644
index 928b8581fcb0..000000000000
--- a/arch/ppc64/kernel/module.c
+++ /dev/null
@@ -1,455 +0,0 @@
1/* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18#include <linux/module.h>
19#include <linux/elf.h>
20#include <linux/moduleloader.h>
21#include <linux/err.h>
22#include <linux/vmalloc.h>
23#include <asm/module.h>
24#include <asm/uaccess.h>
25
26/* FIXME: We don't do .init separately. To do this, we'd need to have
27 a separate r2 value in the init and core section, and stub between
28 them, too.
29
30 Using a magic allocator which places modules within 32MB solves
31 this, and makes other things simpler. Anton?
32 --RR. */
33#if 0
34#define DEBUGP printk
35#else
36#define DEBUGP(fmt , ...)
37#endif
38
39/* There's actually a third entry here, but it's unused */
40struct ppc64_opd_entry
41{
42 unsigned long funcaddr;
43 unsigned long r2;
44};
45
46/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
47 the kernel itself). But on PPC64, these need to be used for every
48 jump, actually, to reset r2 (TOC+0x8000). */
49struct ppc64_stub_entry
50{
51 /* 28 byte jump instruction sequence (7 instructions) */
52 unsigned char jump[28];
53 unsigned char unused[4];
54 /* Data for the above code */
55 struct ppc64_opd_entry opd;
56};
57
58/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
59 function which may be more than 24-bits away. We could simply
60 patch the new r2 value and function pointer into the stub, but it's
61 significantly shorter to put these values at the end of the stub
62 code, and patch the stub address (32-bits relative to the TOC ptr,
63 r2) into the stub. */
64static struct ppc64_stub_entry ppc64_stub =
65{ .jump = {
66 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
67 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
68 /* Save current r2 value in magic place on the stack. */
69 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */
70 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */
71 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
72 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
73 0x4e, 0x80, 0x04, 0x20 /* bctr */
74} };
75
76/* Count how many different 24-bit relocations (different symbol,
77 different addend) */
78static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
79{
80 unsigned int i, j, ret = 0;
81
82 /* FIXME: Only count external ones --RR */
83 /* Sure, this is order(n^2), but it's usually short, and not
84 time critical */
85 for (i = 0; i < num; i++) {
86 /* Only count 24-bit relocs, others don't need stubs */
87 if (ELF64_R_TYPE(rela[i].r_info) != R_PPC_REL24)
88 continue;
89 for (j = 0; j < i; j++) {
90 /* If this addend appeared before, it's
91 already been counted */
92 if (rela[i].r_info == rela[j].r_info
93 && rela[i].r_addend == rela[j].r_addend)
94 break;
95 }
96 if (j == i) ret++;
97 }
98 return ret;
99}
100
101void *module_alloc(unsigned long size)
102{
103 if (size == 0)
104 return NULL;
105
106 return vmalloc_exec(size);
107}
108
109/* Free memory returned from module_alloc */
110void module_free(struct module *mod, void *module_region)
111{
112 vfree(module_region);
113 /* FIXME: If module_region == mod->init_region, trim exception
114 table entries. */
115}
116
117/* Get size of potential trampolines required. */
118static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
119 const Elf64_Shdr *sechdrs)
120{
121 /* One extra reloc so it's always 0-funcaddr terminated */
122 unsigned long relocs = 1;
123 unsigned i;
124
125 /* Every relocated section... */
126 for (i = 1; i < hdr->e_shnum; i++) {
127 if (sechdrs[i].sh_type == SHT_RELA) {
128 DEBUGP("Found relocations in section %u\n", i);
129 DEBUGP("Ptr: %p. Number: %lu\n",
130 (void *)sechdrs[i].sh_addr,
131 sechdrs[i].sh_size / sizeof(Elf64_Rela));
132 relocs += count_relocs((void *)sechdrs[i].sh_addr,
133 sechdrs[i].sh_size
134 / sizeof(Elf64_Rela));
135 }
136 }
137
138 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
139 return relocs * sizeof(struct ppc64_stub_entry);
140}
141
142static void dedotify_versions(struct modversion_info *vers,
143 unsigned long size)
144{
145 struct modversion_info *end;
146
147 for (end = (void *)vers + size; vers < end; vers++)
148 if (vers->name[0] == '.')
149 memmove(vers->name, vers->name+1, strlen(vers->name));
150}
151
152/* Undefined symbols which refer to .funcname, hack to funcname */
153static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
154{
155 unsigned int i;
156
157 for (i = 1; i < numsyms; i++) {
158 if (syms[i].st_shndx == SHN_UNDEF) {
159 char *name = strtab + syms[i].st_name;
160 if (name[0] == '.')
161 memmove(name, name+1, strlen(name));
162 }
163 }
164}
165
166int module_frob_arch_sections(Elf64_Ehdr *hdr,
167 Elf64_Shdr *sechdrs,
168 char *secstrings,
169 struct module *me)
170{
171 unsigned int i;
172
173 /* Find .toc and .stubs sections, symtab and strtab */
174 for (i = 1; i < hdr->e_shnum; i++) {
175 char *p;
176 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
177 me->arch.stubs_section = i;
178 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
179 me->arch.toc_section = i;
180 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
181 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
182 sechdrs[i].sh_size);
183
184 /* We don't handle .init for the moment: rename to _init */
185 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
186 p[0] = '_';
187
188 if (sechdrs[i].sh_type == SHT_SYMTAB)
189 dedotify((void *)hdr + sechdrs[i].sh_offset,
190 sechdrs[i].sh_size / sizeof(Elf64_Sym),
191 (void *)hdr
192 + sechdrs[sechdrs[i].sh_link].sh_offset);
193 }
194 if (!me->arch.stubs_section || !me->arch.toc_section) {
195 printk("%s: doesn't contain .toc or .stubs.\n", me->name);
196 return -ENOEXEC;
197 }
198
199 /* Override the stubs size */
200 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
201 return 0;
202}
203
204int apply_relocate(Elf64_Shdr *sechdrs,
205 const char *strtab,
206 unsigned int symindex,
207 unsigned int relsec,
208 struct module *me)
209{
210 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
211 return -ENOEXEC;
212}
213
214/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
215 gives the value maximum span in an instruction which uses a signed
216 offset) */
217static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
218{
219 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
220}
221
222/* Both low and high 16 bits are added as SIGNED additions, so if low
223 16 bits has high bit set, high 16 bits must be adjusted. These
224 macros do that (stolen from binutils). */
225#define PPC_LO(v) ((v) & 0xffff)
226#define PPC_HI(v) (((v) >> 16) & 0xffff)
227#define PPC_HA(v) PPC_HI ((v) + 0x8000)
228
229/* Patch stub to reference function and correct r2 value. */
230static inline int create_stub(Elf64_Shdr *sechdrs,
231 struct ppc64_stub_entry *entry,
232 struct ppc64_opd_entry *opd,
233 struct module *me)
234{
235 Elf64_Half *loc1, *loc2;
236 long reladdr;
237
238 *entry = ppc64_stub;
239
240 loc1 = (Elf64_Half *)&entry->jump[2];
241 loc2 = (Elf64_Half *)&entry->jump[6];
242
243 /* Stub uses address relative to r2. */
244 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
245 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
246 printk("%s: Address %p of stub out of range of %p.\n",
247 me->name, (void *)reladdr, (void *)my_r2);
248 return 0;
249 }
250 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
251
252 *loc1 = PPC_HA(reladdr);
253 *loc2 = PPC_LO(reladdr);
254 entry->opd.funcaddr = opd->funcaddr;
255 entry->opd.r2 = opd->r2;
256 return 1;
257}
258
259/* Create stub to jump to function described in this OPD: we need the
260 stub to set up the TOC ptr (r2) for the function. */
261static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
262 unsigned long opdaddr,
263 struct module *me)
264{
265 struct ppc64_stub_entry *stubs;
266 struct ppc64_opd_entry *opd = (void *)opdaddr;
267 unsigned int i, num_stubs;
268
269 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
270
271 /* Find this stub, or if that fails, the next avail. entry */
272 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
273 for (i = 0; stubs[i].opd.funcaddr; i++) {
274 BUG_ON(i >= num_stubs);
275
276 if (stubs[i].opd.funcaddr == opd->funcaddr)
277 return (unsigned long)&stubs[i];
278 }
279
280 if (!create_stub(sechdrs, &stubs[i], opd, me))
281 return 0;
282
283 return (unsigned long)&stubs[i];
284}
285
286/* We expect a noop next: if it is, replace it with instruction to
287 restore r2. */
288static int restore_r2(u32 *instruction, struct module *me)
289{
290 if (*instruction != 0x60000000) {
291 printk("%s: Expect noop after relocate, got %08x\n",
292 me->name, *instruction);
293 return 0;
294 }
295 *instruction = 0xe8410028; /* ld r2,40(r1) */
296 return 1;
297}
298
299int apply_relocate_add(Elf64_Shdr *sechdrs,
300 const char *strtab,
301 unsigned int symindex,
302 unsigned int relsec,
303 struct module *me)
304{
305 unsigned int i;
306 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
307 Elf64_Sym *sym;
308 unsigned long *location;
309 unsigned long value;
310
311 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
312 sechdrs[relsec].sh_info);
313 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
314 /* This is where to make the change */
315 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
316 + rela[i].r_offset;
317 /* This is the symbol it is referring to */
318 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
319 + ELF64_R_SYM(rela[i].r_info);
320
321 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
322 location, (long)ELF64_R_TYPE(rela[i].r_info),
323 strtab + sym->st_name, (unsigned long)sym->st_value,
324 (long)rela[i].r_addend);
325
326 /* `Everything is relative'. */
327 value = sym->st_value + rela[i].r_addend;
328
329 switch (ELF64_R_TYPE(rela[i].r_info)) {
330 case R_PPC64_ADDR32:
331 /* Simply set it */
332 *(u32 *)location = value;
333 break;
334
335 case R_PPC64_ADDR64:
336 /* Simply set it */
337 *(unsigned long *)location = value;
338 break;
339
340 case R_PPC64_TOC:
341 *(unsigned long *)location = my_r2(sechdrs, me);
342 break;
343
344 case R_PPC64_TOC16:
345 /* Subtact TOC pointer */
346 value -= my_r2(sechdrs, me);
347 if (value + 0x8000 > 0xffff) {
348 printk("%s: bad TOC16 relocation (%lu)\n",
349 me->name, value);
350 return -ENOEXEC;
351 }
352 *((uint16_t *) location)
353 = (*((uint16_t *) location) & ~0xffff)
354 | (value & 0xffff);
355 break;
356
357 case R_PPC64_TOC16_DS:
358 /* Subtact TOC pointer */
359 value -= my_r2(sechdrs, me);
360 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
361 printk("%s: bad TOC16_DS relocation (%lu)\n",
362 me->name, value);
363 return -ENOEXEC;
364 }
365 *((uint16_t *) location)
366 = (*((uint16_t *) location) & ~0xfffc)
367 | (value & 0xfffc);
368 break;
369
370 case R_PPC_REL24:
371 /* FIXME: Handle weak symbols here --RR */
372 if (sym->st_shndx == SHN_UNDEF) {
373 /* External: go via stub */
374 value = stub_for_addr(sechdrs, value, me);
375 if (!value)
376 return -ENOENT;
377 if (!restore_r2((u32 *)location + 1, me))
378 return -ENOEXEC;
379 }
380
381 /* Convert value to relative */
382 value -= (unsigned long)location;
383 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
384 printk("%s: REL24 %li out of range!\n",
385 me->name, (long int)value);
386 return -ENOEXEC;
387 }
388
389 /* Only replace bits 2 through 26 */
390 *(uint32_t *)location
391 = (*(uint32_t *)location & ~0x03fffffc)
392 | (value & 0x03fffffc);
393 break;
394
395 default:
396 printk("%s: Unknown ADD relocation: %lu\n",
397 me->name,
398 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
399 return -ENOEXEC;
400 }
401 }
402
403 return 0;
404}
405
406LIST_HEAD(module_bug_list);
407
408int module_finalize(const Elf_Ehdr *hdr,
409 const Elf_Shdr *sechdrs, struct module *me)
410{
411 char *secstrings;
412 unsigned int i;
413
414 me->arch.bug_table = NULL;
415 me->arch.num_bugs = 0;
416
417 /* Find the __bug_table section, if present */
418 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
419 for (i = 1; i < hdr->e_shnum; i++) {
420 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
421 continue;
422 me->arch.bug_table = (void *) sechdrs[i].sh_addr;
423 me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
424 break;
425 }
426
427 /*
428 * Strictly speaking this should have a spinlock to protect against
429 * traversals, but since we only traverse on BUG()s, a spinlock
430 * could potentially lead to deadlock and thus be counter-productive.
431 */
432 list_add(&me->arch.bug_list, &module_bug_list);
433
434 return 0;
435}
436
437void module_arch_cleanup(struct module *mod)
438{
439 list_del(&mod->arch.bug_list);
440}
441
442struct bug_entry *module_find_bug(unsigned long bugaddr)
443{
444 struct mod_arch_specific *mod;
445 unsigned int i;
446 struct bug_entry *bug;
447
448 list_for_each_entry(mod, &module_bug_list, bug_list) {
449 bug = mod->bug_table;
450 for (i = 0; i < mod->num_bugs; ++i, ++bug)
451 if (bugaddr == bug->bug_addr)
452 return bug;
453 }
454 return NULL;
455}
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
deleted file mode 100644
index 3cef1b8f57f0..000000000000
--- a/arch/ppc64/kernel/pci.c
+++ /dev/null
@@ -1,1319 +0,0 @@
1/*
2 * Port for PPC64 David Engebretsen, IBM Corp.
3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
4 *
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Rework, based on alpha PCI code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#undef DEBUG
15
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/pci.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/mm.h>
23#include <linux/list.h>
24#include <linux/syscalls.h>
25
26#include <asm/processor.h>
27#include <asm/io.h>
28#include <asm/prom.h>
29#include <asm/pci-bridge.h>
30#include <asm/byteorder.h>
31#include <asm/irq.h>
32#include <asm/machdep.h>
33#include <asm/udbg.h>
34#include <asm/ppc-pci.h>
35
36#ifdef DEBUG
37#define DBG(fmt...) udbg_printf(fmt)
38#else
39#define DBG(fmt...)
40#endif
41
42unsigned long pci_probe_only = 1;
43unsigned long pci_assign_all_buses = 0;
44
45/*
46 * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch
47 * devices we don't have access to.
48 */
49unsigned long io_page_mask;
50
51EXPORT_SYMBOL(io_page_mask);
52
53#ifdef CONFIG_PPC_MULTIPLATFORM
54static void fixup_resource(struct resource *res, struct pci_dev *dev);
55static void do_bus_setup(struct pci_bus *bus);
56#endif
57
58unsigned int pcibios_assign_all_busses(void)
59{
60 return pci_assign_all_buses;
61}
62
63/* pci_io_base -- the base address from which io bars are offsets.
64 * This is the lowest I/O base address (so bar values are always positive),
65 * and it *must* be the start of ISA space if an ISA bus exists because
66 * ISA drivers use hard coded offsets. If no ISA bus exists a dummy
67 * page is mapped and isa_io_limit prevents access to it.
68 */
69unsigned long isa_io_base; /* NULL if no ISA bus */
70EXPORT_SYMBOL(isa_io_base);
71unsigned long pci_io_base;
72EXPORT_SYMBOL(pci_io_base);
73
74void iSeries_pcibios_init(void);
75
76LIST_HEAD(hose_list);
77
78struct dma_mapping_ops pci_dma_ops;
79EXPORT_SYMBOL(pci_dma_ops);
80
81int global_phb_number; /* Global phb counter */
82
83/* Cached ISA bridge dev. */
84struct pci_dev *ppc64_isabridge_dev = NULL;
85
86static void fixup_broken_pcnet32(struct pci_dev* dev)
87{
88 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
89 dev->vendor = PCI_VENDOR_ID_AMD;
90 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
91 }
92}
93DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
94
95void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
96 struct resource *res)
97{
98 unsigned long offset = 0;
99 struct pci_controller *hose = pci_bus_to_host(dev->bus);
100
101 if (!hose)
102 return;
103
104 if (res->flags & IORESOURCE_IO)
105 offset = (unsigned long)hose->io_base_virt - pci_io_base;
106
107 if (res->flags & IORESOURCE_MEM)
108 offset = hose->pci_mem_offset;
109
110 region->start = res->start - offset;
111 region->end = res->end - offset;
112}
113
114void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
115 struct pci_bus_region *region)
116{
117 unsigned long offset = 0;
118 struct pci_controller *hose = pci_bus_to_host(dev->bus);
119
120 if (!hose)
121 return;
122
123 if (res->flags & IORESOURCE_IO)
124 offset = (unsigned long)hose->io_base_virt - pci_io_base;
125
126 if (res->flags & IORESOURCE_MEM)
127 offset = hose->pci_mem_offset;
128
129 res->start = region->start + offset;
130 res->end = region->end + offset;
131}
132
133#ifdef CONFIG_HOTPLUG
134EXPORT_SYMBOL(pcibios_resource_to_bus);
135EXPORT_SYMBOL(pcibios_bus_to_resource);
136#endif
137
138/*
139 * We need to avoid collisions with `mirrored' VGA ports
140 * and other strange ISA hardware, so we always want the
141 * addresses to be allocated in the 0x000-0x0ff region
142 * modulo 0x400.
143 *
144 * Why? Because some silly external IO cards only decode
145 * the low 10 bits of the IO address. The 0x00-0xff region
146 * is reserved for motherboard devices that decode all 16
147 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
148 * but we want to try to avoid allocating at 0x2900-0x2bff
149 * which might have be mirrored at 0x0100-0x03ff..
150 */
151void pcibios_align_resource(void *data, struct resource *res,
152 unsigned long size, unsigned long align)
153{
154 struct pci_dev *dev = data;
155 struct pci_controller *hose = pci_bus_to_host(dev->bus);
156 unsigned long start = res->start;
157 unsigned long alignto;
158
159 if (res->flags & IORESOURCE_IO) {
160 unsigned long offset = (unsigned long)hose->io_base_virt -
161 pci_io_base;
162 /* Make sure we start at our min on all hoses */
163 if (start - offset < PCIBIOS_MIN_IO)
164 start = PCIBIOS_MIN_IO + offset;
165
166 /*
167 * Put everything into 0x00-0xff region modulo 0x400
168 */
169 if (start & 0x300)
170 start = (start + 0x3ff) & ~0x3ff;
171
172 } else if (res->flags & IORESOURCE_MEM) {
173 /* Make sure we start at our min on all hoses */
174 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM)
175 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset;
176
177 /* Align to multiple of size of minimum base. */
178 alignto = max(0x1000UL, align);
179 start = ALIGN(start, alignto);
180 }
181
182 res->start = start;
183}
184
185static DEFINE_SPINLOCK(hose_spinlock);
186
187/*
188 * pci_controller(phb) initialized common variables.
189 */
190void __devinit pci_setup_pci_controller(struct pci_controller *hose)
191{
192 memset(hose, 0, sizeof(struct pci_controller));
193
194 spin_lock(&hose_spinlock);
195 hose->global_number = global_phb_number++;
196 list_add_tail(&hose->list_node, &hose_list);
197 spin_unlock(&hose_spinlock);
198}
199
200static void __init pcibios_claim_one_bus(struct pci_bus *b)
201{
202 struct pci_dev *dev;
203 struct pci_bus *child_bus;
204
205 list_for_each_entry(dev, &b->devices, bus_list) {
206 int i;
207
208 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
209 struct resource *r = &dev->resource[i];
210
211 if (r->parent || !r->start || !r->flags)
212 continue;
213 pci_claim_resource(dev, i);
214 }
215 }
216
217 list_for_each_entry(child_bus, &b->children, node)
218 pcibios_claim_one_bus(child_bus);
219}
220
221#ifndef CONFIG_PPC_ISERIES
222static void __init pcibios_claim_of_setup(void)
223{
224 struct pci_bus *b;
225
226 list_for_each_entry(b, &pci_root_buses, node)
227 pcibios_claim_one_bus(b);
228}
229#endif
230
231#ifdef CONFIG_PPC_MULTIPLATFORM
232static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
233{
234 u32 *prop;
235 int len;
236
237 prop = (u32 *) get_property(np, name, &len);
238 if (prop && len >= 4)
239 return *prop;
240 return def;
241}
242
243static unsigned int pci_parse_of_flags(u32 addr0)
244{
245 unsigned int flags = 0;
246
247 if (addr0 & 0x02000000) {
248 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
249 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
250 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
251 if (addr0 & 0x40000000)
252 flags |= IORESOURCE_PREFETCH
253 | PCI_BASE_ADDRESS_MEM_PREFETCH;
254 } else if (addr0 & 0x01000000)
255 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
256 return flags;
257}
258
259#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
260
261static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
262{
263 u64 base, size;
264 unsigned int flags;
265 struct resource *res;
266 u32 *addrs, i;
267 int proplen;
268
269 addrs = (u32 *) get_property(node, "assigned-addresses", &proplen);
270 if (!addrs)
271 return;
272 for (; proplen >= 20; proplen -= 20, addrs += 5) {
273 flags = pci_parse_of_flags(addrs[0]);
274 if (!flags)
275 continue;
276 base = GET_64BIT(addrs, 1);
277 size = GET_64BIT(addrs, 3);
278 if (!size)
279 continue;
280 i = addrs[0] & 0xff;
281 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
282 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
283 } else if (i == dev->rom_base_reg) {
284 res = &dev->resource[PCI_ROM_RESOURCE];
285 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
286 } else {
287 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
288 continue;
289 }
290 res->start = base;
291 res->end = base + size - 1;
292 res->flags = flags;
293 res->name = pci_name(dev);
294 fixup_resource(res, dev);
295 }
296}
297
298struct pci_dev *of_create_pci_dev(struct device_node *node,
299 struct pci_bus *bus, int devfn)
300{
301 struct pci_dev *dev;
302 const char *type;
303
304 dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
305 if (!dev)
306 return NULL;
307 type = get_property(node, "device_type", NULL);
308 if (type == NULL)
309 type = "";
310
311 memset(dev, 0, sizeof(struct pci_dev));
312 dev->bus = bus;
313 dev->sysdata = node;
314 dev->dev.parent = bus->bridge;
315 dev->dev.bus = &pci_bus_type;
316 dev->devfn = devfn;
317 dev->multifunction = 0; /* maybe a lie? */
318
319 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
320 dev->device = get_int_prop(node, "device-id", 0xffff);
321 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
322 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
323
324 dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/
325
326 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
327 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
328 dev->class = get_int_prop(node, "class-code", 0);
329
330 dev->current_state = 4; /* unknown power state */
331
332 if (!strcmp(type, "pci")) {
333 /* a PCI-PCI bridge */
334 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
335 dev->rom_base_reg = PCI_ROM_ADDRESS1;
336 } else if (!strcmp(type, "cardbus")) {
337 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
338 } else {
339 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
340 dev->rom_base_reg = PCI_ROM_ADDRESS;
341 dev->irq = NO_IRQ;
342 if (node->n_intrs > 0) {
343 dev->irq = node->intrs[0].line;
344 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
345 dev->irq);
346 }
347 }
348
349 pci_parse_of_addrs(node, dev);
350
351 pci_device_add(dev, bus);
352
353 /* XXX pci_scan_msi_device(dev); */
354
355 return dev;
356}
357EXPORT_SYMBOL(of_create_pci_dev);
358
359void __devinit of_scan_bus(struct device_node *node,
360 struct pci_bus *bus)
361{
362 struct device_node *child = NULL;
363 u32 *reg;
364 int reglen, devfn;
365 struct pci_dev *dev;
366
367 while ((child = of_get_next_child(node, child)) != NULL) {
368 reg = (u32 *) get_property(child, "reg", &reglen);
369 if (reg == NULL || reglen < 20)
370 continue;
371 devfn = (reg[0] >> 8) & 0xff;
372 /* create a new pci_dev for this device */
373 dev = of_create_pci_dev(child, bus, devfn);
374 if (!dev)
375 continue;
376 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
377 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
378 of_scan_pci_bridge(child, dev);
379 }
380
381 do_bus_setup(bus);
382}
383EXPORT_SYMBOL(of_scan_bus);
384
385void __devinit of_scan_pci_bridge(struct device_node *node,
386 struct pci_dev *dev)
387{
388 struct pci_bus *bus;
389 u32 *busrange, *ranges;
390 int len, i, mode;
391 struct resource *res;
392 unsigned int flags;
393 u64 size;
394
395 /* parse bus-range property */
396 busrange = (u32 *) get_property(node, "bus-range", &len);
397 if (busrange == NULL || len != 8) {
398 printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n",
399 node->full_name);
400 return;
401 }
402 ranges = (u32 *) get_property(node, "ranges", &len);
403 if (ranges == NULL) {
404 printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n",
405 node->full_name);
406 return;
407 }
408
409 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
410 if (!bus) {
411 printk(KERN_ERR "Failed to create pci bus for %s\n",
412 node->full_name);
413 return;
414 }
415
416 bus->primary = dev->bus->number;
417 bus->subordinate = busrange[1];
418 bus->bridge_ctl = 0;
419 bus->sysdata = node;
420
421 /* parse ranges property */
422 /* PCI #address-cells == 3 and #size-cells == 2 always */
423 res = &dev->resource[PCI_BRIDGE_RESOURCES];
424 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
425 res->flags = 0;
426 bus->resource[i] = res;
427 ++res;
428 }
429 i = 1;
430 for (; len >= 32; len -= 32, ranges += 8) {
431 flags = pci_parse_of_flags(ranges[0]);
432 size = GET_64BIT(ranges, 6);
433 if (flags == 0 || size == 0)
434 continue;
435 if (flags & IORESOURCE_IO) {
436 res = bus->resource[0];
437 if (res->flags) {
438 printk(KERN_ERR "PCI: ignoring extra I/O range"
439 " for bridge %s\n", node->full_name);
440 continue;
441 }
442 } else {
443 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
444 printk(KERN_ERR "PCI: too many memory ranges"
445 " for bridge %s\n", node->full_name);
446 continue;
447 }
448 res = bus->resource[i];
449 ++i;
450 }
451 res->start = GET_64BIT(ranges, 1);
452 res->end = res->start + size - 1;
453 res->flags = flags;
454 fixup_resource(res, dev);
455 }
456 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
457 bus->number);
458
459 mode = PCI_PROBE_NORMAL;
460 if (ppc_md.pci_probe_mode)
461 mode = ppc_md.pci_probe_mode(bus);
462 if (mode == PCI_PROBE_DEVTREE)
463 of_scan_bus(node, bus);
464 else if (mode == PCI_PROBE_NORMAL)
465 pci_scan_child_bus(bus);
466}
467EXPORT_SYMBOL(of_scan_pci_bridge);
468#endif /* CONFIG_PPC_MULTIPLATFORM */
469
470void __devinit scan_phb(struct pci_controller *hose)
471{
472 struct pci_bus *bus;
473 struct device_node *node = hose->arch_data;
474 int i, mode;
475 struct resource *res;
476
477 bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
478 if (bus == NULL) {
479 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
480 hose->global_number);
481 return;
482 }
483 bus->secondary = hose->first_busno;
484 hose->bus = bus;
485
486 bus->resource[0] = res = &hose->io_resource;
487 if (res->flags && request_resource(&ioport_resource, res))
488 printk(KERN_ERR "Failed to request PCI IO region "
489 "on PCI domain %04x\n", hose->global_number);
490
491 for (i = 0; i < 3; ++i) {
492 res = &hose->mem_resources[i];
493 bus->resource[i+1] = res;
494 if (res->flags && request_resource(&iomem_resource, res))
495 printk(KERN_ERR "Failed to request PCI memory region "
496 "on PCI domain %04x\n", hose->global_number);
497 }
498
499 mode = PCI_PROBE_NORMAL;
500#ifdef CONFIG_PPC_MULTIPLATFORM
501 if (ppc_md.pci_probe_mode)
502 mode = ppc_md.pci_probe_mode(bus);
503 if (mode == PCI_PROBE_DEVTREE) {
504 bus->subordinate = hose->last_busno;
505 of_scan_bus(node, bus);
506 }
507#endif /* CONFIG_PPC_MULTIPLATFORM */
508 if (mode == PCI_PROBE_NORMAL)
509 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
510 pci_bus_add_devices(bus);
511}
512
513static int __init pcibios_init(void)
514{
515 struct pci_controller *hose, *tmp;
516
517 /* For now, override phys_mem_access_prot. If we need it,
518 * later, we may move that initialization to each ppc_md
519 */
520 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
521
522#ifdef CONFIG_PPC_ISERIES
523 iSeries_pcibios_init();
524#endif
525
526 printk("PCI: Probing PCI hardware\n");
527
528 /* Scan all of the recorded PCI controllers. */
529 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
530 scan_phb(hose);
531
532#ifndef CONFIG_PPC_ISERIES
533 if (pci_probe_only)
534 pcibios_claim_of_setup();
535 else
536 /* FIXME: `else' will be removed when
537 pci_assign_unassigned_resources() is able to work
538 correctly with [partially] allocated PCI tree. */
539 pci_assign_unassigned_resources();
540#endif /* !CONFIG_PPC_ISERIES */
541
542 /* Call machine dependent final fixup */
543 if (ppc_md.pcibios_fixup)
544 ppc_md.pcibios_fixup();
545
546 /* Cache the location of the ISA bridge (if we have one) */
547 ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
548 if (ppc64_isabridge_dev != NULL)
549 printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
550
551#ifdef CONFIG_PPC_MULTIPLATFORM
552 /* map in PCI I/O space */
553 phbs_remap_io();
554#endif
555
556 printk("PCI: Probing PCI hardware done\n");
557
558 return 0;
559}
560
561subsys_initcall(pcibios_init);
562
563char __init *pcibios_setup(char *str)
564{
565 return str;
566}
567
568int pcibios_enable_device(struct pci_dev *dev, int mask)
569{
570 u16 cmd, oldcmd;
571 int i;
572
573 pci_read_config_word(dev, PCI_COMMAND, &cmd);
574 oldcmd = cmd;
575
576 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
577 struct resource *res = &dev->resource[i];
578
579 /* Only set up the requested stuff */
580 if (!(mask & (1<<i)))
581 continue;
582
583 if (res->flags & IORESOURCE_IO)
584 cmd |= PCI_COMMAND_IO;
585 if (res->flags & IORESOURCE_MEM)
586 cmd |= PCI_COMMAND_MEMORY;
587 }
588
589 if (cmd != oldcmd) {
590 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
591 pci_name(dev), cmd);
592 /* Enable the appropriate bits in the PCI command register. */
593 pci_write_config_word(dev, PCI_COMMAND, cmd);
594 }
595 return 0;
596}
597
598/*
599 * Return the domain number for this bus.
600 */
601int pci_domain_nr(struct pci_bus *bus)
602{
603#ifdef CONFIG_PPC_ISERIES
604 return 0;
605#else
606 struct pci_controller *hose = pci_bus_to_host(bus);
607
608 return hose->global_number;
609#endif
610}
611
612EXPORT_SYMBOL(pci_domain_nr);
613
614/* Decide whether to display the domain number in /proc */
615int pci_proc_domain(struct pci_bus *bus)
616{
617#ifdef CONFIG_PPC_ISERIES
618 return 0;
619#else
620 struct pci_controller *hose = pci_bus_to_host(bus);
621 return hose->buid;
622#endif
623}
624
625/*
626 * Platform support for /proc/bus/pci/X/Y mmap()s,
627 * modelled on the sparc64 implementation by Dave Miller.
628 * -- paulus.
629 */
630
631/*
632 * Adjust vm_pgoff of VMA such that it is the physical page offset
633 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
634 *
635 * Basically, the user finds the base address for his device which he wishes
636 * to mmap. They read the 32-bit value from the config space base register,
637 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
638 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
639 *
640 * Returns negative error code on failure, zero on success.
641 */
642static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
643 unsigned long *offset,
644 enum pci_mmap_state mmap_state)
645{
646 struct pci_controller *hose = pci_bus_to_host(dev->bus);
647 unsigned long io_offset = 0;
648 int i, res_bit;
649
650 if (hose == 0)
651 return NULL; /* should never happen */
652
653 /* If memory, add on the PCI bridge address offset */
654 if (mmap_state == pci_mmap_mem) {
655 *offset += hose->pci_mem_offset;
656 res_bit = IORESOURCE_MEM;
657 } else {
658 io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
659 *offset += io_offset;
660 res_bit = IORESOURCE_IO;
661 }
662
663 /*
664 * Check that the offset requested corresponds to one of the
665 * resources of the device.
666 */
667 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
668 struct resource *rp = &dev->resource[i];
669 int flags = rp->flags;
670
671 /* treat ROM as memory (should be already) */
672 if (i == PCI_ROM_RESOURCE)
673 flags |= IORESOURCE_MEM;
674
675 /* Active and same type? */
676 if ((flags & res_bit) == 0)
677 continue;
678
679 /* In the range of this resource? */
680 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
681 continue;
682
683 /* found it! construct the final physical address */
684 if (mmap_state == pci_mmap_io)
685 *offset += hose->io_base_phys - io_offset;
686 return rp;
687 }
688
689 return NULL;
690}
691
692/*
693 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
694 * device mapping.
695 */
696static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
697 pgprot_t protection,
698 enum pci_mmap_state mmap_state,
699 int write_combine)
700{
701 unsigned long prot = pgprot_val(protection);
702
703 /* Write combine is always 0 on non-memory space mappings. On
704 * memory space, if the user didn't pass 1, we check for a
705 * "prefetchable" resource. This is a bit hackish, but we use
706 * this to workaround the inability of /sysfs to provide a write
707 * combine bit
708 */
709 if (mmap_state != pci_mmap_mem)
710 write_combine = 0;
711 else if (write_combine == 0) {
712 if (rp->flags & IORESOURCE_PREFETCH)
713 write_combine = 1;
714 }
715
716 /* XXX would be nice to have a way to ask for write-through */
717 prot |= _PAGE_NO_CACHE;
718 if (write_combine)
719 prot &= ~_PAGE_GUARDED;
720 else
721 prot |= _PAGE_GUARDED;
722
723 printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
724 prot);
725
726 return __pgprot(prot);
727}
728
729/*
730 * This one is used by /dev/mem and fbdev who have no clue about the
731 * PCI device, it tries to find the PCI device first and calls the
732 * above routine
733 */
734pgprot_t pci_phys_mem_access_prot(struct file *file,
735 unsigned long pfn,
736 unsigned long size,
737 pgprot_t protection)
738{
739 struct pci_dev *pdev = NULL;
740 struct resource *found = NULL;
741 unsigned long prot = pgprot_val(protection);
742 unsigned long offset = pfn << PAGE_SHIFT;
743 int i;
744
745 if (page_is_ram(pfn))
746 return __pgprot(prot);
747
748 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
749
750 for_each_pci_dev(pdev) {
751 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
752 struct resource *rp = &pdev->resource[i];
753 int flags = rp->flags;
754
755 /* Active and same type? */
756 if ((flags & IORESOURCE_MEM) == 0)
757 continue;
758 /* In the range of this resource? */
759 if (offset < (rp->start & PAGE_MASK) ||
760 offset > rp->end)
761 continue;
762 found = rp;
763 break;
764 }
765 if (found)
766 break;
767 }
768 if (found) {
769 if (found->flags & IORESOURCE_PREFETCH)
770 prot &= ~_PAGE_GUARDED;
771 pci_dev_put(pdev);
772 }
773
774 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
775
776 return __pgprot(prot);
777}
778
779
780/*
781 * Perform the actual remap of the pages for a PCI device mapping, as
782 * appropriate for this architecture. The region in the process to map
783 * is described by vm_start and vm_end members of VMA, the base physical
784 * address is found in vm_pgoff.
785 * The pci device structure is provided so that architectures may make mapping
786 * decisions on a per-device or per-bus basis.
787 *
788 * Returns a negative error code on failure, zero on success.
789 */
790int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
791 enum pci_mmap_state mmap_state,
792 int write_combine)
793{
794 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
795 struct resource *rp;
796 int ret;
797
798 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
799 if (rp == NULL)
800 return -EINVAL;
801
802 vma->vm_pgoff = offset >> PAGE_SHIFT;
803 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
804 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
805 vma->vm_page_prot,
806 mmap_state, write_combine);
807
808 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
809 vma->vm_end - vma->vm_start, vma->vm_page_prot);
810
811 return ret;
812}
813
814#ifdef CONFIG_PPC_MULTIPLATFORM
815static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
816{
817 struct pci_dev *pdev;
818 struct device_node *np;
819
820 pdev = to_pci_dev (dev);
821 np = pci_device_to_OF_node(pdev);
822 if (np == NULL || np->full_name == NULL)
823 return 0;
824 return sprintf(buf, "%s", np->full_name);
825}
826static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
827#endif /* CONFIG_PPC_MULTIPLATFORM */
828
829void pcibios_add_platform_entries(struct pci_dev *pdev)
830{
831#ifdef CONFIG_PPC_MULTIPLATFORM
832 device_create_file(&pdev->dev, &dev_attr_devspec);
833#endif /* CONFIG_PPC_MULTIPLATFORM */
834}
835
836#ifdef CONFIG_PPC_MULTIPLATFORM
837
838#define ISA_SPACE_MASK 0x1
839#define ISA_SPACE_IO 0x1
840
841static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
842 unsigned long phb_io_base_phys,
843 void __iomem * phb_io_base_virt)
844{
845 struct isa_range *range;
846 unsigned long pci_addr;
847 unsigned int isa_addr;
848 unsigned int size;
849 int rlen = 0;
850
851 range = (struct isa_range *) get_property(isa_node, "ranges", &rlen);
852 if (range == NULL || (rlen < sizeof(struct isa_range))) {
853 printk(KERN_ERR "no ISA ranges or unexpected isa range size,"
854 "mapping 64k\n");
855 __ioremap_explicit(phb_io_base_phys,
856 (unsigned long)phb_io_base_virt,
857 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED);
858 return;
859 }
860
861 /* From "ISA Binding to 1275"
862 * The ranges property is laid out as an array of elements,
863 * each of which comprises:
864 * cells 0 - 1: an ISA address
865 * cells 2 - 4: a PCI address
866 * (size depending on dev->n_addr_cells)
867 * cell 5: the size of the range
868 */
869 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) {
870 isa_addr = range->isa_addr.a_lo;
871 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
872 range->pci_addr.a_lo;
873
874 /* Assume these are both zero */
875 if ((pci_addr != 0) || (isa_addr != 0)) {
876 printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
877 __FUNCTION__);
878 return;
879 }
880
881 size = PAGE_ALIGN(range->size);
882
883 __ioremap_explicit(phb_io_base_phys,
884 (unsigned long) phb_io_base_virt,
885 size, _PAGE_NO_CACHE | _PAGE_GUARDED);
886 }
887}
888
889void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
890 struct device_node *dev, int prim)
891{
892 unsigned int *ranges, pci_space;
893 unsigned long size;
894 int rlen = 0;
895 int memno = 0;
896 struct resource *res;
897 int np, na = prom_n_addr_cells(dev);
898 unsigned long pci_addr, cpu_phys_addr;
899
900 np = na + 5;
901
902 /* From "PCI Binding to 1275"
903 * The ranges property is laid out as an array of elements,
904 * each of which comprises:
905 * cells 0 - 2: a PCI address
906 * cells 3 or 3+4: a CPU physical address
907 * (size depending on dev->n_addr_cells)
908 * cells 4+5 or 5+6: the size of the range
909 */
910 rlen = 0;
911 hose->io_base_phys = 0;
912 ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
913 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
914 res = NULL;
915 pci_space = ranges[0];
916 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
917
918 cpu_phys_addr = ranges[3];
919 if (na >= 2)
920 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
921
922 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
923 ranges += np;
924 if (size == 0)
925 continue;
926
927 /* Now consume following elements while they are contiguous */
928 while (rlen >= np * sizeof(unsigned int)) {
929 unsigned long addr, phys;
930
931 if (ranges[0] != pci_space)
932 break;
933 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
934 phys = ranges[3];
935 if (na >= 2)
936 phys = (phys << 32) | ranges[4];
937 if (addr != pci_addr + size ||
938 phys != cpu_phys_addr + size)
939 break;
940
941 size += ((unsigned long)ranges[na+3] << 32)
942 | ranges[na+4];
943 ranges += np;
944 rlen -= np * sizeof(unsigned int);
945 }
946
947 switch ((pci_space >> 24) & 0x3) {
948 case 1: /* I/O space */
949 hose->io_base_phys = cpu_phys_addr;
950 hose->pci_io_size = size;
951
952 res = &hose->io_resource;
953 res->flags = IORESOURCE_IO;
954 res->start = pci_addr;
955 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number,
956 res->start, res->start + size - 1);
957 break;
958 case 2: /* memory space */
959 memno = 0;
960 while (memno < 3 && hose->mem_resources[memno].flags)
961 ++memno;
962
963 if (memno == 0)
964 hose->pci_mem_offset = cpu_phys_addr - pci_addr;
965 if (memno < 3) {
966 res = &hose->mem_resources[memno];
967 res->flags = IORESOURCE_MEM;
968 res->start = cpu_phys_addr;
969 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number,
970 res->start, res->start + size - 1);
971 }
972 break;
973 }
974 if (res != NULL) {
975 res->name = dev->full_name;
976 res->end = res->start + size - 1;
977 res->parent = NULL;
978 res->sibling = NULL;
979 res->child = NULL;
980 }
981 }
982}
983
984void __init pci_setup_phb_io(struct pci_controller *hose, int primary)
985{
986 unsigned long size = hose->pci_io_size;
987 unsigned long io_virt_offset;
988 struct resource *res;
989 struct device_node *isa_dn;
990
991 hose->io_base_virt = reserve_phb_iospace(size);
992 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
993 hose->global_number, hose->io_base_phys,
994 (unsigned long) hose->io_base_virt);
995
996 if (primary) {
997 pci_io_base = (unsigned long)hose->io_base_virt;
998 isa_dn = of_find_node_by_type(NULL, "isa");
999 if (isa_dn) {
1000 isa_io_base = pci_io_base;
1001 pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys,
1002 hose->io_base_virt);
1003 of_node_put(isa_dn);
1004 /* Allow all IO */
1005 io_page_mask = -1;
1006 }
1007 }
1008
1009 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
1010 res = &hose->io_resource;
1011 res->start += io_virt_offset;
1012 res->end += io_virt_offset;
1013}
1014
1015void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose,
1016 int primary)
1017{
1018 unsigned long size = hose->pci_io_size;
1019 unsigned long io_virt_offset;
1020 struct resource *res;
1021
1022 hose->io_base_virt = __ioremap(hose->io_base_phys, size,
1023 _PAGE_NO_CACHE | _PAGE_GUARDED);
1024 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n",
1025 hose->global_number, hose->io_base_phys,
1026 (unsigned long) hose->io_base_virt);
1027
1028 if (primary)
1029 pci_io_base = (unsigned long)hose->io_base_virt;
1030
1031 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base;
1032 res = &hose->io_resource;
1033 res->start += io_virt_offset;
1034 res->end += io_virt_offset;
1035}
1036
1037
1038static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys,
1039 unsigned long *start_virt, unsigned long *size)
1040{
1041 struct pci_controller *hose = pci_bus_to_host(bus);
1042 struct pci_bus_region region;
1043 struct resource *res;
1044
1045 if (bus->self) {
1046 res = bus->resource[0];
1047 pcibios_resource_to_bus(bus->self, &region, res);
1048 *start_phys = hose->io_base_phys + region.start;
1049 *start_virt = (unsigned long) hose->io_base_virt +
1050 region.start;
1051 if (region.end > region.start)
1052 *size = region.end - region.start + 1;
1053 else {
1054 printk("%s(): unexpected region 0x%lx->0x%lx\n",
1055 __FUNCTION__, region.start, region.end);
1056 return 1;
1057 }
1058
1059 } else {
1060 /* Root Bus */
1061 res = &hose->io_resource;
1062 *start_phys = hose->io_base_phys;
1063 *start_virt = (unsigned long) hose->io_base_virt;
1064 if (res->end > res->start)
1065 *size = res->end - res->start + 1;
1066 else {
1067 printk("%s(): unexpected region 0x%lx->0x%lx\n",
1068 __FUNCTION__, res->start, res->end);
1069 return 1;
1070 }
1071 }
1072
1073 return 0;
1074}
1075
1076int unmap_bus_range(struct pci_bus *bus)
1077{
1078 unsigned long start_phys;
1079 unsigned long start_virt;
1080 unsigned long size;
1081
1082 if (!bus) {
1083 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
1084 return 1;
1085 }
1086
1087 if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
1088 return 1;
1089 if (iounmap_explicit((void __iomem *) start_virt, size))
1090 return 1;
1091
1092 return 0;
1093}
1094EXPORT_SYMBOL(unmap_bus_range);
1095
1096int remap_bus_range(struct pci_bus *bus)
1097{
1098 unsigned long start_phys;
1099 unsigned long start_virt;
1100 unsigned long size;
1101
1102 if (!bus) {
1103 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__);
1104 return 1;
1105 }
1106
1107
1108 if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
1109 return 1;
1110 printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size);
1111 if (__ioremap_explicit(start_phys, start_virt, size,
1112 _PAGE_NO_CACHE | _PAGE_GUARDED))
1113 return 1;
1114
1115 return 0;
1116}
1117EXPORT_SYMBOL(remap_bus_range);
1118
1119void phbs_remap_io(void)
1120{
1121 struct pci_controller *hose, *tmp;
1122
1123 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1124 remap_bus_range(hose->bus);
1125}
1126
1127/*
1128 * ppc64 can have multifunction devices that do not respond to function 0.
1129 * In this case we must scan all functions.
1130 * XXX this can go now, we use the OF device tree in all the
1131 * cases that caused problems. -- paulus
1132 */
1133int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
1134{
1135 return 0;
1136}
1137
1138static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
1139{
1140 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1141 unsigned long start, end, mask, offset;
1142
1143 if (res->flags & IORESOURCE_IO) {
1144 offset = (unsigned long)hose->io_base_virt - pci_io_base;
1145
1146 start = res->start += offset;
1147 end = res->end += offset;
1148
1149 /* Need to allow IO access to pages that are in the
1150 ISA range */
1151 if (start < MAX_ISA_PORT) {
1152 if (end > MAX_ISA_PORT)
1153 end = MAX_ISA_PORT;
1154
1155 start >>= PAGE_SHIFT;
1156 end >>= PAGE_SHIFT;
1157
1158 /* get the range of pages for the map */
1159 mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
1160 io_page_mask |= mask;
1161 }
1162 } else if (res->flags & IORESOURCE_MEM) {
1163 res->start += hose->pci_mem_offset;
1164 res->end += hose->pci_mem_offset;
1165 }
1166}
1167
1168void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
1169 struct pci_bus *bus)
1170{
1171 /* Update device resources. */
1172 int i;
1173
1174 for (i = 0; i < PCI_NUM_RESOURCES; i++)
1175 if (dev->resource[i].flags)
1176 fixup_resource(&dev->resource[i], dev);
1177}
1178EXPORT_SYMBOL(pcibios_fixup_device_resources);
1179
1180static void __devinit do_bus_setup(struct pci_bus *bus)
1181{
1182 struct pci_dev *dev;
1183
1184 ppc_md.iommu_bus_setup(bus);
1185
1186 list_for_each_entry(dev, &bus->devices, bus_list)
1187 ppc_md.iommu_dev_setup(dev);
1188
1189 if (ppc_md.irq_bus_setup)
1190 ppc_md.irq_bus_setup(bus);
1191}
1192
1193void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1194{
1195 struct pci_dev *dev = bus->self;
1196
1197 if (dev && pci_probe_only &&
1198 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1199 /* This is a subordinate bridge */
1200
1201 pci_read_bridge_bases(bus);
1202 pcibios_fixup_device_resources(dev, bus);
1203 }
1204
1205 do_bus_setup(bus);
1206
1207 if (!pci_probe_only)
1208 return;
1209
1210 list_for_each_entry(dev, &bus->devices, bus_list)
1211 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1212 pcibios_fixup_device_resources(dev, bus);
1213}
1214EXPORT_SYMBOL(pcibios_fixup_bus);
1215
1216/*
1217 * Reads the interrupt pin to determine if interrupt is use by card.
1218 * If the interrupt is used, then gets the interrupt line from the
1219 * openfirmware and sets it in the pci_dev and pci_config line.
1220 */
1221int pci_read_irq_line(struct pci_dev *pci_dev)
1222{
1223 u8 intpin;
1224 struct device_node *node;
1225
1226 pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin);
1227 if (intpin == 0)
1228 return 0;
1229
1230 node = pci_device_to_OF_node(pci_dev);
1231 if (node == NULL)
1232 return -1;
1233
1234 if (node->n_intrs == 0)
1235 return -1;
1236
1237 pci_dev->irq = node->intrs[0].line;
1238
1239 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
1240
1241 return 0;
1242}
1243EXPORT_SYMBOL(pci_read_irq_line);
1244
1245void pci_resource_to_user(const struct pci_dev *dev, int bar,
1246 const struct resource *rsrc,
1247 u64 *start, u64 *end)
1248{
1249 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1250 unsigned long offset = 0;
1251
1252 if (hose == NULL)
1253 return;
1254
1255 if (rsrc->flags & IORESOURCE_IO)
1256 offset = pci_io_base - (unsigned long)hose->io_base_virt +
1257 hose->io_base_phys;
1258
1259 *start = rsrc->start + offset;
1260 *end = rsrc->end + offset;
1261}
1262
1263#endif /* CONFIG_PPC_MULTIPLATFORM */
1264
1265
1266#define IOBASE_BRIDGE_NUMBER 0
1267#define IOBASE_MEMORY 1
1268#define IOBASE_IO 2
1269#define IOBASE_ISA_IO 3
1270#define IOBASE_ISA_MEM 4
1271
1272long sys_pciconfig_iobase(long which, unsigned long in_bus,
1273 unsigned long in_devfn)
1274{
1275 struct pci_controller* hose;
1276 struct list_head *ln;
1277 struct pci_bus *bus = NULL;
1278 struct device_node *hose_node;
1279
1280 /* Argh ! Please forgive me for that hack, but that's the
1281 * simplest way to get existing XFree to not lockup on some
1282 * G5 machines... So when something asks for bus 0 io base
1283 * (bus 0 is HT root), we return the AGP one instead.
1284 */
1285 if (machine_is_compatible("MacRISC4"))
1286 if (in_bus == 0)
1287 in_bus = 0xf0;
1288
1289 /* That syscall isn't quite compatible with PCI domains, but it's
1290 * used on pre-domains setup. We return the first match
1291 */
1292
1293 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
1294 bus = pci_bus_b(ln);
1295 if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate))
1296 break;
1297 bus = NULL;
1298 }
1299 if (bus == NULL || bus->sysdata == NULL)
1300 return -ENODEV;
1301
1302 hose_node = (struct device_node *)bus->sysdata;
1303 hose = PCI_DN(hose_node)->phb;
1304
1305 switch (which) {
1306 case IOBASE_BRIDGE_NUMBER:
1307 return (long)hose->first_busno;
1308 case IOBASE_MEMORY:
1309 return (long)hose->pci_mem_offset;
1310 case IOBASE_IO:
1311 return (long)hose->io_base_phys;
1312 case IOBASE_ISA_IO:
1313 return (long)isa_io_base;
1314 case IOBASE_ISA_MEM:
1315 return -EINVAL;
1316 }
1317
1318 return -EOPNOTSUPP;
1319}
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
deleted file mode 100644
index e1a32f802c0b..000000000000
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Support for DMA from PCI devices to main memory on
3 * machines without an iommu or with directly addressable
4 * RAM (typically a pmac with 2Gb of RAM or less)
5 *
6 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/mm.h>
21#include <linux/dma-mapping.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h>
30#include <asm/ppc-pci.h>
31
32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag)
34{
35 void *ret;
36
37 ret = (void *)__get_free_pages(flag, get_order(size));
38 if (ret != NULL) {
39 memset(ret, 0, size);
40 *dma_handle = virt_to_abs(ret);
41 }
42 return ret;
43}
44
45static void pci_direct_free_coherent(struct device *hwdev, size_t size,
46 void *vaddr, dma_addr_t dma_handle)
47{
48 free_pages((unsigned long)vaddr, get_order(size));
49}
50
51static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
52 size_t size, enum dma_data_direction direction)
53{
54 return virt_to_abs(ptr);
55}
56
57static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction)
59{
60}
61
62static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
63 int nents, enum dma_data_direction direction)
64{
65 int i;
66
67 for (i = 0; i < nents; i++, sg++) {
68 sg->dma_address = page_to_phys(sg->page) + sg->offset;
69 sg->dma_length = sg->length;
70 }
71
72 return nents;
73}
74
75static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
76 int nents, enum dma_data_direction direction)
77{
78}
79
80static int pci_direct_dma_supported(struct device *dev, u64 mask)
81{
82 return mask < 0x100000000ull;
83}
84
85void __init pci_direct_iommu_init(void)
86{
87 pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
88 pci_dma_ops.free_coherent = pci_direct_free_coherent;
89 pci_dma_ops.map_single = pci_direct_map_single;
90 pci_dma_ops.unmap_single = pci_direct_unmap_single;
91 pci_dma_ops.map_sg = pci_direct_map_sg;
92 pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
93 pci_dma_ops.dma_supported = pci_direct_dma_supported;
94}
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
deleted file mode 100644
index 12c4c9e9bbc7..000000000000
--- a/arch/ppc64/kernel/pci_dn.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/*
2 * pci_dn.c
3 *
4 * Copyright (C) 2001 Todd Inglett, IBM Corporation
5 *
6 * PCI manipulation via device_nodes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <linux/kernel.h>
23#include <linux/pci.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/bootmem.h>
28
29#include <asm/io.h>
30#include <asm/prom.h>
31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h>
33#include <asm/ppc-pci.h>
34
35/*
36 * Traverse_func that inits the PCI fields of the device node.
37 * NOTE: this *must* be done before read/write config to the device.
38 */
39static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
40{
41 struct pci_controller *phb = data;
42 int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL);
43 u32 *regs;
44 struct pci_dn *pdn;
45
46 if (mem_init_done)
47 pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
48 else
49 pdn = alloc_bootmem(sizeof(*pdn));
50 if (pdn == NULL)
51 return NULL;
52 memset(pdn, 0, sizeof(*pdn));
53 dn->data = pdn;
54 pdn->node = dn;
55 pdn->phb = phb;
56 regs = (u32 *)get_property(dn, "reg", NULL);
57 if (regs) {
58 /* First register entry is addr (00BBSS00) */
59 pdn->busno = (regs[0] >> 16) & 0xff;
60 pdn->devfn = (regs[0] >> 8) & 0xff;
61 }
62
63 pdn->pci_ext_config_space = (type && *type == 1);
64 return NULL;
65}
66
67/*
68 * Traverse a device tree stopping each PCI device in the tree.
69 * This is done depth first. As each node is processed, a "pre"
70 * function is called and the children are processed recursively.
71 *
72 * The "pre" func returns a value. If non-zero is returned from
73 * the "pre" func, the traversal stops and this value is returned.
74 * This return value is useful when using traverse as a method of
75 * finding a device.
76 *
77 * NOTE: we do not run the func for devices that do not appear to
78 * be PCI except for the start node which we assume (this is good
79 * because the start node is often a phb which may be missing PCI
80 * properties).
81 * We use the class-code as an indicator. If we run into
82 * one of these nodes we also assume its siblings are non-pci for
83 * performance.
84 */
85void *traverse_pci_devices(struct device_node *start, traverse_func pre,
86 void *data)
87{
88 struct device_node *dn, *nextdn;
89 void *ret;
90
91 /* We started with a phb, iterate all childs */
92 for (dn = start->child; dn; dn = nextdn) {
93 u32 *classp, class;
94
95 nextdn = NULL;
96 classp = (u32 *)get_property(dn, "class-code", NULL);
97 class = classp ? *classp : 0;
98
99 if (pre && ((ret = pre(dn, data)) != NULL))
100 return ret;
101
102 /* If we are a PCI bridge, go down */
103 if (dn->child && ((class >> 8) == PCI_CLASS_BRIDGE_PCI ||
104 (class >> 8) == PCI_CLASS_BRIDGE_CARDBUS))
105 /* Depth first...do children */
106 nextdn = dn->child;
107 else if (dn->sibling)
108 /* ok, try next sibling instead. */
109 nextdn = dn->sibling;
110 if (!nextdn) {
111 /* Walk up to next valid sibling. */
112 do {
113 dn = dn->parent;
114 if (dn == start)
115 return NULL;
116 } while (dn->sibling == NULL);
117 nextdn = dn->sibling;
118 }
119 }
120 return NULL;
121}
122
123/**
124 * pci_devs_phb_init_dynamic - setup pci devices under this PHB
125 * phb: pci-to-host bridge (top-level bridge connecting to cpu)
126 *
127 * This routine is called both during boot, (before the memory
128 * subsystem is set up, before kmalloc is valid) and during the
129 * dynamic lpar operation of adding a PHB to a running system.
130 */
131void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
132{
133 struct device_node * dn = (struct device_node *) phb->arch_data;
134 struct pci_dn *pdn;
135
136 /* PHB nodes themselves must not match */
137 update_dn_pci_info(dn, phb);
138 pdn = dn->data;
139 if (pdn) {
140 pdn->devfn = pdn->busno = -1;
141 pdn->phb = phb;
142 }
143
144 /* Update dn->phb ptrs for new phb and children devices */
145 traverse_pci_devices(dn, update_dn_pci_info, phb);
146}
147
148/*
149 * Traversal func that looks for a <busno,devfcn> value.
150 * If found, the pci_dn is returned (thus terminating the traversal).
151 */
152static void *is_devfn_node(struct device_node *dn, void *data)
153{
154 int busno = ((unsigned long)data >> 8) & 0xff;
155 int devfn = ((unsigned long)data) & 0xff;
156 struct pci_dn *pci = dn->data;
157
158 if (pci && (devfn == pci->devfn) && (busno == pci->busno))
159 return dn;
160 return NULL;
161}
162
163/*
164 * This is the "slow" path for looking up a device_node from a
165 * pci_dev. It will hunt for the device under its parent's
166 * phb and then update sysdata for a future fastpath.
167 *
168 * It may also do fixups on the actual device since this happens
169 * on the first read/write.
170 *
171 * Note that it also must deal with devices that don't exist.
172 * In this case it may probe for real hardware ("just in case")
173 * and add a device_node to the device tree if necessary.
174 *
175 */
176struct device_node *fetch_dev_dn(struct pci_dev *dev)
177{
178 struct device_node *orig_dn = dev->sysdata;
179 struct device_node *dn;
180 unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
181
182 dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval);
183 if (dn)
184 dev->sysdata = dn;
185 return dn;
186}
187EXPORT_SYMBOL(fetch_dev_dn);
188
189static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
190{
191 struct device_node *np = node;
192 struct pci_dn *pci = NULL;
193 int err = NOTIFY_OK;
194
195 switch (action) {
196 case PSERIES_RECONFIG_ADD:
197 pci = np->parent->data;
198 if (pci)
199 update_dn_pci_info(np, pci->phb);
200 break;
201 default:
202 err = NOTIFY_DONE;
203 break;
204 }
205 return err;
206}
207
208static struct notifier_block pci_dn_reconfig_nb = {
209 .notifier_call = pci_dn_reconfig_notifier,
210};
211
212/**
213 * pci_devs_phb_init - Initialize phbs and pci devs under them.
214 *
215 * This routine walks over all phb's (pci-host bridges) on the
216 * system, and sets up assorted pci-related structures
217 * (including pci info in the device node structs) for each
218 * pci device found underneath. This routine runs once,
219 * early in the boot sequence.
220 */
221void __init pci_devs_phb_init(void)
222{
223 struct pci_controller *phb, *tmp;
224
225 /* This must be done first so the device nodes have valid pci info! */
226 list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
227 pci_devs_phb_init_dynamic(phb);
228
229 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
230}
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
deleted file mode 100644
index bdf15dbbf4f0..000000000000
--- a/arch/ppc64/kernel/pci_iommu.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * arch/ppc64/kernel/pci_iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 *
5 * Rewrite, cleanup, new allocation schemes:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 *
8 * Dynamic DMA mapping support, platform-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/pci.h>
33#include <linux/dma-mapping.h>
34#include <asm/io.h>
35#include <asm/prom.h>
36#include <asm/iommu.h>
37#include <asm/pci-bridge.h>
38#include <asm/machdep.h>
39#include <asm/ppc-pci.h>
40
41/*
42 * We can use ->sysdata directly and avoid the extra work in
43 * pci_device_to_OF_node since ->sysdata will have been initialised
44 * in the iommu init code for all devices.
45 */
46#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
47
48static inline struct iommu_table *devnode_table(struct device *dev)
49{
50 struct pci_dev *pdev;
51
52 if (!dev) {
53 pdev = ppc64_isabridge_dev;
54 if (!pdev)
55 return NULL;
56 } else
57 pdev = to_pci_dev(dev);
58
59 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
60}
61
62
63/* Allocates a contiguous real buffer and creates mappings over it.
64 * Returns the virtual address of the buffer and sets dma_handle
65 * to the dma address (mapping) of the first page.
66 */
67static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
68 dma_addr_t *dma_handle, gfp_t flag)
69{
70 return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
71 flag);
72}
73
74static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
75 void *vaddr, dma_addr_t dma_handle)
76{
77 iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
78}
79
80/* Creates TCEs for a user provided buffer. The user buffer must be
81 * contiguous real kernel storage (not vmalloc). The address of the buffer
82 * passed here is the kernel (virtual) address of the buffer. The buffer
83 * need not be page aligned, the dma_addr_t returned will point to the same
84 * byte within the page as vaddr.
85 */
86static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
87 size_t size, enum dma_data_direction direction)
88{
89 return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
90}
91
92
93static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
94 size_t size, enum dma_data_direction direction)
95{
96 iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
97}
98
99
100static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
101 int nelems, enum dma_data_direction direction)
102{
103 return iommu_map_sg(pdev, devnode_table(pdev), sglist,
104 nelems, direction);
105}
106
107static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
108 int nelems, enum dma_data_direction direction)
109{
110 iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
111}
112
113/* We support DMA to/from any memory page via the iommu */
114static int pci_iommu_dma_supported(struct device *dev, u64 mask)
115{
116 return 1;
117}
118
119void pci_iommu_init(void)
120{
121 pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
122 pci_dma_ops.free_coherent = pci_iommu_free_coherent;
123 pci_dma_ops.map_single = pci_iommu_map_single;
124 pci_dma_ops.unmap_single = pci_iommu_unmap_single;
125 pci_dma_ops.map_sg = pci_iommu_map_sg;
126 pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
127 pci_dma_ops.dma_supported = pci_iommu_dma_supported;
128}
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
deleted file mode 100644
index 84006e26342c..000000000000
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/string.h>
12#include <linux/console.h>
13#include <net/checksum.h>
14
15#include <asm/processor.h>
16#include <asm/uaccess.h>
17#include <asm/io.h>
18#include <asm/system.h>
19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h>
22
23EXPORT_SYMBOL(strcpy);
24EXPORT_SYMBOL(strncpy);
25EXPORT_SYMBOL(strcat);
26EXPORT_SYMBOL(strncat);
27EXPORT_SYMBOL(strchr);
28EXPORT_SYMBOL(strrchr);
29EXPORT_SYMBOL(strpbrk);
30EXPORT_SYMBOL(strstr);
31EXPORT_SYMBOL(strlen);
32EXPORT_SYMBOL(strnlen);
33EXPORT_SYMBOL(strcmp);
34EXPORT_SYMBOL(strncmp);
35
36EXPORT_SYMBOL(csum_partial);
37EXPORT_SYMBOL(csum_partial_copy_generic);
38EXPORT_SYMBOL(ip_fast_csum);
39EXPORT_SYMBOL(csum_tcpudp_magic);
40
41EXPORT_SYMBOL(__copy_tofrom_user);
42EXPORT_SYMBOL(__clear_user);
43EXPORT_SYMBOL(__strncpy_from_user);
44EXPORT_SYMBOL(__strnlen_user);
45
46EXPORT_SYMBOL(reloc_offset);
47
48EXPORT_SYMBOL(_insb);
49EXPORT_SYMBOL(_outsb);
50EXPORT_SYMBOL(_insw);
51EXPORT_SYMBOL(_outsw);
52EXPORT_SYMBOL(_insl);
53EXPORT_SYMBOL(_outsl);
54EXPORT_SYMBOL(_insw_ns);
55EXPORT_SYMBOL(_outsw_ns);
56EXPORT_SYMBOL(_insl_ns);
57EXPORT_SYMBOL(_outsl_ns);
58
59EXPORT_SYMBOL(kernel_thread);
60
61EXPORT_SYMBOL(giveup_fpu);
62#ifdef CONFIG_ALTIVEC
63EXPORT_SYMBOL(giveup_altivec);
64#endif
65EXPORT_SYMBOL(__flush_icache_range);
66EXPORT_SYMBOL(flush_dcache_range);
67
68EXPORT_SYMBOL(memcpy);
69EXPORT_SYMBOL(memset);
70EXPORT_SYMBOL(memmove);
71EXPORT_SYMBOL(memscan);
72EXPORT_SYMBOL(memcmp);
73EXPORT_SYMBOL(memchr);
74
75EXPORT_SYMBOL(timer_interrupt);
76EXPORT_SYMBOL(console_drivers);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
deleted file mode 100644
index 47cc26e78957..000000000000
--- a/arch/ppc64/kernel/prom.c
+++ /dev/null
@@ -1,1956 +0,0 @@
1/*
2 *
3 *
4 * Procedures for interfacing to Open Firmware.
5 *
6 * Paul Mackerras August 1996.
7 * Copyright (C) 1996 Paul Mackerras.
8 *
9 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
10 * {engebret|bergner}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#undef DEBUG
19
20#include <stdarg.h>
21#include <linux/config.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/threads.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29#include <linux/stringify.h>
30#include <linux/delay.h>
31#include <linux/initrd.h>
32#include <linux/bitops.h>
33#include <linux/module.h>
34#include <linux/module.h>
35
36#include <asm/prom.h>
37#include <asm/rtas.h>
38#include <asm/lmb.h>
39#include <asm/abs_addr.h>
40#include <asm/page.h>
41#include <asm/processor.h>
42#include <asm/irq.h>
43#include <asm/io.h>
44#include <asm/smp.h>
45#include <asm/system.h>
46#include <asm/mmu.h>
47#include <asm/pgtable.h>
48#include <asm/pci.h>
49#include <asm/iommu.h>
50#include <asm/btext.h>
51#include <asm/sections.h>
52#include <asm/machdep.h>
53#include <asm/pSeries_reconfig.h>
54
55#ifdef DEBUG
56#define DBG(fmt...) udbg_printf(fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80extern unsigned long memory_limit;
81
82static int __initdata dt_root_addr_cells;
83static int __initdata dt_root_size_cells;
84static int __initdata iommu_is_off;
85int __initdata iommu_force_on;
86unsigned long tce_alloc_start, tce_alloc_end;
87
88typedef u32 cell_t;
89
90#if 0
91static struct boot_param_header *initial_boot_params __initdata;
92#else
93struct boot_param_header *initial_boot_params;
94#endif
95
96static struct device_node *allnodes = NULL;
97
98/* use when traversing tree through the allnext, child, sibling,
99 * or parent members of struct device_node.
100 */
101static DEFINE_RWLOCK(devtree_lock);
102
103/* export that to outside world */
104struct device_node *of_chosen;
105
106/*
107 * Wrapper for allocating memory for various data that needs to be
108 * attached to device nodes as they are processed at boot or when
109 * added to the device tree later (e.g. DLPAR). At boot there is
110 * already a region reserved so we just increment *mem_start by size;
111 * otherwise we call kmalloc.
112 */
113static void * prom_alloc(unsigned long size, unsigned long *mem_start)
114{
115 unsigned long tmp;
116
117 if (!mem_start)
118 return kmalloc(size, GFP_KERNEL);
119
120 tmp = *mem_start;
121 *mem_start += size;
122 return (void *)tmp;
123}
124
125/*
126 * Find the device_node with a given phandle.
127 */
128static struct device_node * find_phandle(phandle ph)
129{
130 struct device_node *np;
131
132 for (np = allnodes; np != 0; np = np->allnext)
133 if (np->linux_phandle == ph)
134 return np;
135 return NULL;
136}
137
138/*
139 * Find the interrupt parent of a node.
140 */
141static struct device_node * __devinit intr_parent(struct device_node *p)
142{
143 phandle *parp;
144
145 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
146 if (parp == NULL)
147 return p->parent;
148 return find_phandle(*parp);
149}
150
151/*
152 * Find out the size of each entry of the interrupts property
153 * for a node.
154 */
155int __devinit prom_n_intr_cells(struct device_node *np)
156{
157 struct device_node *p;
158 unsigned int *icp;
159
160 for (p = np; (p = intr_parent(p)) != NULL; ) {
161 icp = (unsigned int *)
162 get_property(p, "#interrupt-cells", NULL);
163 if (icp != NULL)
164 return *icp;
165 if (get_property(p, "interrupt-controller", NULL) != NULL
166 || get_property(p, "interrupt-map", NULL) != NULL) {
167 printk("oops, node %s doesn't have #interrupt-cells\n",
168 p->full_name);
169 return 1;
170 }
171 }
172#ifdef DEBUG_IRQ
173 printk("prom_n_intr_cells failed for %s\n", np->full_name);
174#endif
175 return 1;
176}
177
178/*
179 * Map an interrupt from a device up to the platform interrupt
180 * descriptor.
181 */
182static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
183 struct device_node *np, unsigned int *ints,
184 int nintrc)
185{
186 struct device_node *p, *ipar;
187 unsigned int *imap, *imask, *ip;
188 int i, imaplen, match;
189 int newintrc = 0, newaddrc = 0;
190 unsigned int *reg;
191 int naddrc;
192
193 reg = (unsigned int *) get_property(np, "reg", NULL);
194 naddrc = prom_n_addr_cells(np);
195 p = intr_parent(np);
196 while (p != NULL) {
197 if (get_property(p, "interrupt-controller", NULL) != NULL)
198 /* this node is an interrupt controller, stop here */
199 break;
200 imap = (unsigned int *)
201 get_property(p, "interrupt-map", &imaplen);
202 if (imap == NULL) {
203 p = intr_parent(p);
204 continue;
205 }
206 imask = (unsigned int *)
207 get_property(p, "interrupt-map-mask", NULL);
208 if (imask == NULL) {
209 printk("oops, %s has interrupt-map but no mask\n",
210 p->full_name);
211 return 0;
212 }
213 imaplen /= sizeof(unsigned int);
214 match = 0;
215 ipar = NULL;
216 while (imaplen > 0 && !match) {
217 /* check the child-interrupt field */
218 match = 1;
219 for (i = 0; i < naddrc && match; ++i)
220 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
221 for (; i < naddrc + nintrc && match; ++i)
222 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
223 imap += naddrc + nintrc;
224 imaplen -= naddrc + nintrc;
225 /* grab the interrupt parent */
226 ipar = find_phandle((phandle) *imap++);
227 --imaplen;
228 if (ipar == NULL) {
229 printk("oops, no int parent %x in map of %s\n",
230 imap[-1], p->full_name);
231 return 0;
232 }
233 /* find the parent's # addr and intr cells */
234 ip = (unsigned int *)
235 get_property(ipar, "#interrupt-cells", NULL);
236 if (ip == NULL) {
237 printk("oops, no #interrupt-cells on %s\n",
238 ipar->full_name);
239 return 0;
240 }
241 newintrc = *ip;
242 ip = (unsigned int *)
243 get_property(ipar, "#address-cells", NULL);
244 newaddrc = (ip == NULL)? 0: *ip;
245 imap += newaddrc + newintrc;
246 imaplen -= newaddrc + newintrc;
247 }
248 if (imaplen < 0) {
249 printk("oops, error decoding int-map on %s, len=%d\n",
250 p->full_name, imaplen);
251 return 0;
252 }
253 if (!match) {
254#ifdef DEBUG_IRQ
255 printk("oops, no match in %s int-map for %s\n",
256 p->full_name, np->full_name);
257#endif
258 return 0;
259 }
260 p = ipar;
261 naddrc = newaddrc;
262 nintrc = newintrc;
263 ints = imap - nintrc;
264 reg = ints - naddrc;
265 }
266 if (p == NULL) {
267#ifdef DEBUG_IRQ
268 printk("hmmm, int tree for %s doesn't have ctrler\n",
269 np->full_name);
270#endif
271 return 0;
272 }
273 *irq = ints;
274 *ictrler = p;
275 return nintrc;
276}
277
278static int __devinit finish_node_interrupts(struct device_node *np,
279 unsigned long *mem_start,
280 int measure_only)
281{
282 unsigned int *ints;
283 int intlen, intrcells, intrcount;
284 int i, j, n;
285 unsigned int *irq, virq;
286 struct device_node *ic;
287
288 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
289 if (ints == NULL)
290 return 0;
291 intrcells = prom_n_intr_cells(np);
292 intlen /= intrcells * sizeof(unsigned int);
293
294 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
295 if (!np->intrs)
296 return -ENOMEM;
297
298 if (measure_only)
299 return 0;
300
301 intrcount = 0;
302 for (i = 0; i < intlen; ++i, ints += intrcells) {
303 n = map_interrupt(&irq, &ic, np, ints, intrcells);
304 if (n <= 0)
305 continue;
306
307 /* don't map IRQ numbers under a cascaded 8259 controller */
308 if (ic && device_is_compatible(ic, "chrp,iic")) {
309 np->intrs[intrcount].line = irq[0];
310 } else {
311 virq = virt_irq_create_mapping(irq[0]);
312 if (virq == NO_IRQ) {
313 printk(KERN_CRIT "Could not allocate interrupt"
314 " number for %s\n", np->full_name);
315 continue;
316 }
317 np->intrs[intrcount].line = irq_offset_up(virq);
318 }
319
320 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
321 if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
322 char *name = get_property(ic->parent, "name", NULL);
323 if (name && !strcmp(name, "u3"))
324 np->intrs[intrcount].line += 128;
325 else if (!(name && !strcmp(name, "mac-io")))
326 /* ignore other cascaded controllers, such as
327 the k2-sata-root */
328 break;
329 }
330 np->intrs[intrcount].sense = 1;
331 if (n > 1)
332 np->intrs[intrcount].sense = irq[1];
333 if (n > 2) {
334 printk("hmmm, got %d intr cells for %s:", n,
335 np->full_name);
336 for (j = 0; j < n; ++j)
337 printk(" %d", irq[j]);
338 printk("\n");
339 }
340 ++intrcount;
341 }
342 np->n_intrs = intrcount;
343
344 return 0;
345}
346
347static int __devinit interpret_pci_props(struct device_node *np,
348 unsigned long *mem_start,
349 int naddrc, int nsizec,
350 int measure_only)
351{
352 struct address_range *adr;
353 struct pci_reg_property *pci_addrs;
354 int i, l, n_addrs;
355
356 pci_addrs = (struct pci_reg_property *)
357 get_property(np, "assigned-addresses", &l);
358 if (!pci_addrs)
359 return 0;
360
361 n_addrs = l / sizeof(*pci_addrs);
362
363 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
364 if (!adr)
365 return -ENOMEM;
366
367 if (measure_only)
368 return 0;
369
370 np->addrs = adr;
371 np->n_addrs = n_addrs;
372
373 for (i = 0; i < n_addrs; i++) {
374 adr[i].space = pci_addrs[i].addr.a_hi;
375 adr[i].address = pci_addrs[i].addr.a_lo |
376 ((u64)pci_addrs[i].addr.a_mid << 32);
377 adr[i].size = pci_addrs[i].size_lo;
378 }
379
380 return 0;
381}
382
383static int __init interpret_dbdma_props(struct device_node *np,
384 unsigned long *mem_start,
385 int naddrc, int nsizec,
386 int measure_only)
387{
388 struct reg_property32 *rp;
389 struct address_range *adr;
390 unsigned long base_address;
391 int i, l;
392 struct device_node *db;
393
394 base_address = 0;
395 if (!measure_only) {
396 for (db = np->parent; db != NULL; db = db->parent) {
397 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
398 base_address = db->addrs[0].address;
399 break;
400 }
401 }
402 }
403
404 rp = (struct reg_property32 *) get_property(np, "reg", &l);
405 if (rp != 0 && l >= sizeof(struct reg_property32)) {
406 i = 0;
407 adr = (struct address_range *) (*mem_start);
408 while ((l -= sizeof(struct reg_property32)) >= 0) {
409 if (!measure_only) {
410 adr[i].space = 2;
411 adr[i].address = rp[i].address + base_address;
412 adr[i].size = rp[i].size;
413 }
414 ++i;
415 }
416 np->addrs = adr;
417 np->n_addrs = i;
418 (*mem_start) += i * sizeof(struct address_range);
419 }
420
421 return 0;
422}
423
424static int __init interpret_macio_props(struct device_node *np,
425 unsigned long *mem_start,
426 int naddrc, int nsizec,
427 int measure_only)
428{
429 struct reg_property32 *rp;
430 struct address_range *adr;
431 unsigned long base_address;
432 int i, l;
433 struct device_node *db;
434
435 base_address = 0;
436 if (!measure_only) {
437 for (db = np->parent; db != NULL; db = db->parent) {
438 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
439 base_address = db->addrs[0].address;
440 break;
441 }
442 }
443 }
444
445 rp = (struct reg_property32 *) get_property(np, "reg", &l);
446 if (rp != 0 && l >= sizeof(struct reg_property32)) {
447 i = 0;
448 adr = (struct address_range *) (*mem_start);
449 while ((l -= sizeof(struct reg_property32)) >= 0) {
450 if (!measure_only) {
451 adr[i].space = 2;
452 adr[i].address = rp[i].address + base_address;
453 adr[i].size = rp[i].size;
454 }
455 ++i;
456 }
457 np->addrs = adr;
458 np->n_addrs = i;
459 (*mem_start) += i * sizeof(struct address_range);
460 }
461
462 return 0;
463}
464
465static int __init interpret_isa_props(struct device_node *np,
466 unsigned long *mem_start,
467 int naddrc, int nsizec,
468 int measure_only)
469{
470 struct isa_reg_property *rp;
471 struct address_range *adr;
472 int i, l;
473
474 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
475 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
476 i = 0;
477 adr = (struct address_range *) (*mem_start);
478 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
479 if (!measure_only) {
480 adr[i].space = rp[i].space;
481 adr[i].address = rp[i].address;
482 adr[i].size = rp[i].size;
483 }
484 ++i;
485 }
486 np->addrs = adr;
487 np->n_addrs = i;
488 (*mem_start) += i * sizeof(struct address_range);
489 }
490
491 return 0;
492}
493
494static int __init interpret_root_props(struct device_node *np,
495 unsigned long *mem_start,
496 int naddrc, int nsizec,
497 int measure_only)
498{
499 struct address_range *adr;
500 int i, l;
501 unsigned int *rp;
502 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
503
504 rp = (unsigned int *) get_property(np, "reg", &l);
505 if (rp != 0 && l >= rpsize) {
506 i = 0;
507 adr = (struct address_range *) (*mem_start);
508 while ((l -= rpsize) >= 0) {
509 if (!measure_only) {
510 adr[i].space = 0;
511 adr[i].address = rp[naddrc - 1];
512 adr[i].size = rp[naddrc + nsizec - 1];
513 }
514 ++i;
515 rp += naddrc + nsizec;
516 }
517 np->addrs = adr;
518 np->n_addrs = i;
519 (*mem_start) += i * sizeof(struct address_range);
520 }
521
522 return 0;
523}
524
525static int __devinit finish_node(struct device_node *np,
526 unsigned long *mem_start,
527 interpret_func *ifunc,
528 int naddrc, int nsizec,
529 int measure_only)
530{
531 struct device_node *child;
532 int *ip, rc = 0;
533
534 /* get the device addresses and interrupts */
535 if (ifunc != NULL)
536 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
537 if (rc)
538 goto out;
539
540 rc = finish_node_interrupts(np, mem_start, measure_only);
541 if (rc)
542 goto out;
543
544 /* Look for #address-cells and #size-cells properties. */
545 ip = (int *) get_property(np, "#address-cells", NULL);
546 if (ip != NULL)
547 naddrc = *ip;
548 ip = (int *) get_property(np, "#size-cells", NULL);
549 if (ip != NULL)
550 nsizec = *ip;
551
552 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
553 ifunc = interpret_root_props;
554 else if (np->type == 0)
555 ifunc = NULL;
556 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
557 ifunc = interpret_pci_props;
558 else if (!strcmp(np->type, "dbdma"))
559 ifunc = interpret_dbdma_props;
560 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
561 ifunc = interpret_macio_props;
562 else if (!strcmp(np->type, "isa"))
563 ifunc = interpret_isa_props;
564 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
565 ifunc = interpret_root_props;
566 else if (!((ifunc == interpret_dbdma_props
567 || ifunc == interpret_macio_props)
568 && (!strcmp(np->type, "escc")
569 || !strcmp(np->type, "media-bay"))))
570 ifunc = NULL;
571
572 for (child = np->child; child != NULL; child = child->sibling) {
573 rc = finish_node(child, mem_start, ifunc,
574 naddrc, nsizec, measure_only);
575 if (rc)
576 goto out;
577 }
578out:
579 return rc;
580}
581
582/**
583 * finish_device_tree is called once things are running normally
584 * (i.e. with text and data mapped to the address they were linked at).
585 * It traverses the device tree and fills in some of the additional,
586 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
587 * mapping is also initialized at this point.
588 */
589void __init finish_device_tree(void)
590{
591 unsigned long start, end, size = 0;
592
593 DBG(" -> finish_device_tree\n");
594
595 if (ppc64_interrupt_controller == IC_INVALID) {
596 DBG("failed to configure interrupt controller type\n");
597 panic("failed to configure interrupt controller type\n");
598 }
599
600 /* Initialize virtual IRQ map */
601 virt_irq_init();
602
603 /*
604 * Finish device-tree (pre-parsing some properties etc...)
605 * We do this in 2 passes. One with "measure_only" set, which
606 * will only measure the amount of memory needed, then we can
607 * allocate that memory, and call finish_node again. However,
608 * we must be careful as most routines will fail nowadays when
609 * prom_alloc() returns 0, so we must make sure our first pass
610 * doesn't start at 0. We pre-initialize size to 16 for that
611 * reason and then remove those additional 16 bytes
612 */
613 size = 16;
614 finish_node(allnodes, &size, NULL, 0, 0, 1);
615 size -= 16;
616 end = start = (unsigned long)abs_to_virt(lmb_alloc(size, 128));
617 finish_node(allnodes, &end, NULL, 0, 0, 0);
618 BUG_ON(end != start + size);
619
620 DBG(" <- finish_device_tree\n");
621}
622
623#ifdef DEBUG
624#define printk udbg_printf
625#endif
626
627static inline char *find_flat_dt_string(u32 offset)
628{
629 return ((char *)initial_boot_params) +
630 initial_boot_params->off_dt_strings + offset;
631}
632
633/**
634 * This function is used to scan the flattened device-tree, it is
635 * used to extract the memory informations at boot before we can
636 * unflatten the tree
637 */
638int __init of_scan_flat_dt(int (*it)(unsigned long node,
639 const char *uname, int depth,
640 void *data),
641 void *data)
642{
643 unsigned long p = ((unsigned long)initial_boot_params) +
644 initial_boot_params->off_dt_struct;
645 int rc = 0;
646 int depth = -1;
647
648 do {
649 u32 tag = *((u32 *)p);
650 char *pathp;
651
652 p += 4;
653 if (tag == OF_DT_END_NODE) {
654 depth --;
655 continue;
656 }
657 if (tag == OF_DT_NOP)
658 continue;
659 if (tag == OF_DT_END)
660 break;
661 if (tag == OF_DT_PROP) {
662 u32 sz = *((u32 *)p);
663 p += 8;
664 if (initial_boot_params->version < 0x10)
665 p = _ALIGN(p, sz >= 8 ? 8 : 4);
666 p += sz;
667 p = _ALIGN(p, 4);
668 continue;
669 }
670 if (tag != OF_DT_BEGIN_NODE) {
671 printk(KERN_WARNING "Invalid tag %x scanning flattened"
672 " device tree !\n", tag);
673 return -EINVAL;
674 }
675 depth++;
676 pathp = (char *)p;
677 p = _ALIGN(p + strlen(pathp) + 1, 4);
678 if ((*pathp) == '/') {
679 char *lp, *np;
680 for (lp = NULL, np = pathp; *np; np++)
681 if ((*np) == '/')
682 lp = np+1;
683 if (lp != NULL)
684 pathp = lp;
685 }
686 rc = it(p, pathp, depth, data);
687 if (rc != 0)
688 break;
689 } while(1);
690
691 return rc;
692}
693
694/**
695 * This function can be used within scan_flattened_dt callback to get
696 * access to properties
697 */
698void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
699 unsigned long *size)
700{
701 unsigned long p = node;
702
703 do {
704 u32 tag = *((u32 *)p);
705 u32 sz, noff;
706 const char *nstr;
707
708 p += 4;
709 if (tag == OF_DT_NOP)
710 continue;
711 if (tag != OF_DT_PROP)
712 return NULL;
713
714 sz = *((u32 *)p);
715 noff = *((u32 *)(p + 4));
716 p += 8;
717 if (initial_boot_params->version < 0x10)
718 p = _ALIGN(p, sz >= 8 ? 8 : 4);
719
720 nstr = find_flat_dt_string(noff);
721 if (nstr == NULL) {
722 printk(KERN_WARNING "Can't find property index"
723 " name !\n");
724 return NULL;
725 }
726 if (strcmp(name, nstr) == 0) {
727 if (size)
728 *size = sz;
729 return (void *)p;
730 }
731 p += sz;
732 p = _ALIGN(p, 4);
733 } while(1);
734}
735
736static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
737 unsigned long align)
738{
739 void *res;
740
741 *mem = _ALIGN(*mem, align);
742 res = (void *)*mem;
743 *mem += size;
744
745 return res;
746}
747
748static unsigned long __init unflatten_dt_node(unsigned long mem,
749 unsigned long *p,
750 struct device_node *dad,
751 struct device_node ***allnextpp,
752 unsigned long fpsize)
753{
754 struct device_node *np;
755 struct property *pp, **prev_pp = NULL;
756 char *pathp;
757 u32 tag;
758 unsigned int l, allocl;
759 int has_name = 0;
760 int new_format = 0;
761
762 tag = *((u32 *)(*p));
763 if (tag != OF_DT_BEGIN_NODE) {
764 printk("Weird tag at start of node: %x\n", tag);
765 return mem;
766 }
767 *p += 4;
768 pathp = (char *)*p;
769 l = allocl = strlen(pathp) + 1;
770 *p = _ALIGN(*p + l, 4);
771
772 /* version 0x10 has a more compact unit name here instead of the full
773 * path. we accumulate the full path size using "fpsize", we'll rebuild
774 * it later. We detect this because the first character of the name is
775 * not '/'.
776 */
777 if ((*pathp) != '/') {
778 new_format = 1;
779 if (fpsize == 0) {
780 /* root node: special case. fpsize accounts for path
781 * plus terminating zero. root node only has '/', so
782 * fpsize should be 2, but we want to avoid the first
783 * level nodes to have two '/' so we use fpsize 1 here
784 */
785 fpsize = 1;
786 allocl = 2;
787 } else {
788 /* account for '/' and path size minus terminal 0
789 * already in 'l'
790 */
791 fpsize += l;
792 allocl = fpsize;
793 }
794 }
795
796
797 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
798 __alignof__(struct device_node));
799 if (allnextpp) {
800 memset(np, 0, sizeof(*np));
801 np->full_name = ((char*)np) + sizeof(struct device_node);
802 if (new_format) {
803 char *p = np->full_name;
804 /* rebuild full path for new format */
805 if (dad && dad->parent) {
806 strcpy(p, dad->full_name);
807#ifdef DEBUG
808 if ((strlen(p) + l + 1) != allocl) {
809 DBG("%s: p: %d, l: %d, a: %d\n",
810 pathp, strlen(p), l, allocl);
811 }
812#endif
813 p += strlen(p);
814 }
815 *(p++) = '/';
816 memcpy(p, pathp, l);
817 } else
818 memcpy(np->full_name, pathp, l);
819 prev_pp = &np->properties;
820 **allnextpp = np;
821 *allnextpp = &np->allnext;
822 if (dad != NULL) {
823 np->parent = dad;
824 /* we temporarily use the next field as `last_child'*/
825 if (dad->next == 0)
826 dad->child = np;
827 else
828 dad->next->sibling = np;
829 dad->next = np;
830 }
831 kref_init(&np->kref);
832 }
833 while(1) {
834 u32 sz, noff;
835 char *pname;
836
837 tag = *((u32 *)(*p));
838 if (tag == OF_DT_NOP) {
839 *p += 4;
840 continue;
841 }
842 if (tag != OF_DT_PROP)
843 break;
844 *p += 4;
845 sz = *((u32 *)(*p));
846 noff = *((u32 *)((*p) + 4));
847 *p += 8;
848 if (initial_boot_params->version < 0x10)
849 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
850
851 pname = find_flat_dt_string(noff);
852 if (pname == NULL) {
853 printk("Can't find property name in list !\n");
854 break;
855 }
856 if (strcmp(pname, "name") == 0)
857 has_name = 1;
858 l = strlen(pname) + 1;
859 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
860 __alignof__(struct property));
861 if (allnextpp) {
862 if (strcmp(pname, "linux,phandle") == 0) {
863 np->node = *((u32 *)*p);
864 if (np->linux_phandle == 0)
865 np->linux_phandle = np->node;
866 }
867 if (strcmp(pname, "ibm,phandle") == 0)
868 np->linux_phandle = *((u32 *)*p);
869 pp->name = pname;
870 pp->length = sz;
871 pp->value = (void *)*p;
872 *prev_pp = pp;
873 prev_pp = &pp->next;
874 }
875 *p = _ALIGN((*p) + sz, 4);
876 }
877 /* with version 0x10 we may not have the name property, recreate
878 * it here from the unit name if absent
879 */
880 if (!has_name) {
881 char *p = pathp, *ps = pathp, *pa = NULL;
882 int sz;
883
884 while (*p) {
885 if ((*p) == '@')
886 pa = p;
887 if ((*p) == '/')
888 ps = p + 1;
889 p++;
890 }
891 if (pa < ps)
892 pa = p;
893 sz = (pa - ps) + 1;
894 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
895 __alignof__(struct property));
896 if (allnextpp) {
897 pp->name = "name";
898 pp->length = sz;
899 pp->value = (unsigned char *)(pp + 1);
900 *prev_pp = pp;
901 prev_pp = &pp->next;
902 memcpy(pp->value, ps, sz - 1);
903 ((char *)pp->value)[sz - 1] = 0;
904 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
905 }
906 }
907 if (allnextpp) {
908 *prev_pp = NULL;
909 np->name = get_property(np, "name", NULL);
910 np->type = get_property(np, "device_type", NULL);
911
912 if (!np->name)
913 np->name = "<NULL>";
914 if (!np->type)
915 np->type = "<NULL>";
916 }
917 while (tag == OF_DT_BEGIN_NODE) {
918 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
919 tag = *((u32 *)(*p));
920 }
921 if (tag != OF_DT_END_NODE) {
922 printk("Weird tag at end of node: %x\n", tag);
923 return mem;
924 }
925 *p += 4;
926 return mem;
927}
928
929
930/**
931 * unflattens the device-tree passed by the firmware, creating the
932 * tree of struct device_node. It also fills the "name" and "type"
933 * pointers of the nodes so the normal device-tree walking functions
934 * can be used (this used to be done by finish_device_tree)
935 */
936void __init unflatten_device_tree(void)
937{
938 unsigned long start, mem, size;
939 struct device_node **allnextp = &allnodes;
940 char *p = NULL;
941 int l = 0;
942
943 DBG(" -> unflatten_device_tree()\n");
944
945 /* First pass, scan for size */
946 start = ((unsigned long)initial_boot_params) +
947 initial_boot_params->off_dt_struct;
948 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
949 size = (size | 3) + 1;
950
951 DBG(" size is %lx, allocating...\n", size);
952
953 /* Allocate memory for the expanded device tree */
954 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
955 if (!mem) {
956 DBG("Couldn't allocate memory with lmb_alloc()!\n");
957 panic("Couldn't allocate memory with lmb_alloc()!\n");
958 }
959 mem = (unsigned long)abs_to_virt(mem);
960
961 ((u32 *)mem)[size / 4] = 0xdeadbeef;
962
963 DBG(" unflattening...\n", mem);
964
965 /* Second pass, do actual unflattening */
966 start = ((unsigned long)initial_boot_params) +
967 initial_boot_params->off_dt_struct;
968 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
969 if (*((u32 *)start) != OF_DT_END)
970 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
971 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
972 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
973 ((u32 *)mem)[size / 4] );
974 *allnextp = NULL;
975
976 /* Get pointer to OF "/chosen" node for use everywhere */
977 of_chosen = of_find_node_by_path("/chosen");
978
979 /* Retreive command line */
980 if (of_chosen != NULL) {
981 p = (char *)get_property(of_chosen, "bootargs", &l);
982 if (p != NULL && l > 0)
983 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
984 }
985#ifdef CONFIG_CMDLINE
986 if (l == 0 || (l == 1 && (*p) == 0))
987 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
988#endif /* CONFIG_CMDLINE */
989
990 DBG("Command line is: %s\n", cmd_line);
991
992 DBG(" <- unflatten_device_tree()\n");
993}
994
995
996static int __init early_init_dt_scan_cpus(unsigned long node,
997 const char *uname, int depth, void *data)
998{
999 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1000 u32 *prop;
1001 unsigned long size;
1002
1003 /* We are scanning "cpu" nodes only */
1004 if (type == NULL || strcmp(type, "cpu") != 0)
1005 return 0;
1006
1007 if (initial_boot_params && initial_boot_params->version >= 2) {
1008 /* version 2 of the kexec param format adds the phys cpuid
1009 * of booted proc.
1010 */
1011 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1012 boot_cpuid = 0;
1013 } else {
1014 /* Check if it's the boot-cpu, set it's hw index in paca now */
1015 if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL)
1016 != NULL) {
1017 u32 *prop = of_get_flat_dt_prop(node, "reg", NULL);
1018 set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
1019 boot_cpuid_phys = get_hard_smp_processor_id(0);
1020 }
1021 }
1022
1023#ifdef CONFIG_ALTIVEC
1024 /* Check if we have a VMX and eventually update CPU features */
1025 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
1026 if (prop && (*prop) > 0) {
1027 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1028 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1029 }
1030
1031 /* Same goes for Apple's "altivec" property */
1032 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
1033 if (prop) {
1034 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1035 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1036 }
1037#endif /* CONFIG_ALTIVEC */
1038
1039 /*
1040 * Check for an SMT capable CPU and set the CPU feature. We do
1041 * this by looking at the size of the ibm,ppc-interrupt-server#s
1042 * property
1043 */
1044 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1045 &size);
1046 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1047 if (prop && ((size / sizeof(u32)) > 1))
1048 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1049
1050 return 0;
1051}
1052
1053static int __init early_init_dt_scan_chosen(unsigned long node,
1054 const char *uname, int depth, void *data)
1055{
1056 u32 *prop;
1057 u64 *prop64;
1058
1059 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1060
1061 if (depth != 1 || strcmp(uname, "chosen") != 0)
1062 return 0;
1063
1064 /* get platform type */
1065 prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
1066 if (prop == NULL)
1067 return 0;
1068 _machine = *prop;
1069
1070 /* check if iommu is forced on or off */
1071 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1072 iommu_is_off = 1;
1073 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1074 iommu_force_on = 1;
1075
1076 prop64 = (u64*)of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
1077 if (prop64)
1078 memory_limit = *prop64;
1079
1080 prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-start",NULL);
1081 if (prop64)
1082 tce_alloc_start = *prop64;
1083
1084 prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1085 if (prop64)
1086 tce_alloc_end = *prop64;
1087
1088#ifdef CONFIG_PPC_RTAS
1089 /* To help early debugging via the front panel, we retreive a minimal
1090 * set of RTAS infos now if available
1091 */
1092 {
1093 u64 *basep, *entryp;
1094
1095 basep = (u64*)of_get_flat_dt_prop(node,
1096 "linux,rtas-base", NULL);
1097 entryp = (u64*)of_get_flat_dt_prop(node,
1098 "linux,rtas-entry", NULL);
1099 prop = (u32*)of_get_flat_dt_prop(node,
1100 "linux,rtas-size", NULL);
1101 if (basep && entryp && prop) {
1102 rtas.base = *basep;
1103 rtas.entry = *entryp;
1104 rtas.size = *prop;
1105 }
1106 }
1107#endif /* CONFIG_PPC_RTAS */
1108
1109 /* break now */
1110 return 1;
1111}
1112
1113static int __init early_init_dt_scan_root(unsigned long node,
1114 const char *uname, int depth, void *data)
1115{
1116 u32 *prop;
1117
1118 if (depth != 0)
1119 return 0;
1120
1121 prop = (u32 *)of_get_flat_dt_prop(node, "#size-cells", NULL);
1122 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1123 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1124
1125 prop = (u32 *)of_get_flat_dt_prop(node, "#address-cells", NULL);
1126 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1127 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1128
1129 /* break now */
1130 return 1;
1131}
1132
1133static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1134{
1135 cell_t *p = *cellp;
1136 unsigned long r = 0;
1137
1138 /* Ignore more than 2 cells */
1139 while (s > 2) {
1140 p++;
1141 s--;
1142 }
1143 while (s) {
1144 r <<= 32;
1145 r |= *(p++);
1146 s--;
1147 }
1148
1149 *cellp = p;
1150 return r;
1151}
1152
1153
1154static int __init early_init_dt_scan_memory(unsigned long node,
1155 const char *uname, int depth, void *data)
1156{
1157 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1158 cell_t *reg, *endp;
1159 unsigned long l;
1160
1161 /* We are scanning "memory" nodes only */
1162 if (type == NULL || strcmp(type, "memory") != 0)
1163 return 0;
1164
1165 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
1166 if (reg == NULL)
1167 return 0;
1168
1169 endp = reg + (l / sizeof(cell_t));
1170
1171 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1172 uname, l, reg[0], reg[1], reg[2], reg[3]);
1173
1174 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1175 unsigned long base, size;
1176
1177 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1178 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1179
1180 if (size == 0)
1181 continue;
1182 DBG(" - %lx , %lx\n", base, size);
1183 if (iommu_is_off) {
1184 if (base >= 0x80000000ul)
1185 continue;
1186 if ((base + size) > 0x80000000ul)
1187 size = 0x80000000ul - base;
1188 }
1189 lmb_add(base, size);
1190 }
1191 return 0;
1192}
1193
1194static void __init early_reserve_mem(void)
1195{
1196 u64 base, size;
1197 u64 *reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
1198 initial_boot_params->off_mem_rsvmap);
1199 while (1) {
1200 base = *(reserve_map++);
1201 size = *(reserve_map++);
1202 if (size == 0)
1203 break;
1204 DBG("reserving: %lx -> %lx\n", base, size);
1205 lmb_reserve(base, size);
1206 }
1207
1208#if 0
1209 DBG("memory reserved, lmbs :\n");
1210 lmb_dump_all();
1211#endif
1212}
1213
1214void __init early_init_devtree(void *params)
1215{
1216 DBG(" -> early_init_devtree()\n");
1217
1218 /* Setup flat device-tree pointer */
1219 initial_boot_params = params;
1220
1221 /* Retreive various informations from the /chosen node of the
1222 * device-tree, including the platform type, initrd location and
1223 * size, TCE reserve, and more ...
1224 */
1225 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
1226
1227 /* Scan memory nodes and rebuild LMBs */
1228 lmb_init();
1229 of_scan_flat_dt(early_init_dt_scan_root, NULL);
1230 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1231 lmb_enforce_memory_limit(memory_limit);
1232 lmb_analyze();
1233 lmb_reserve(0, __pa(klimit));
1234
1235 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1236 early_reserve_mem();
1237
1238 DBG("Scanning CPUs ...\n");
1239
1240 /* Retreive hash table size from flattened tree plus other
1241 * CPU related informations (altivec support, boot CPU ID, ...)
1242 */
1243 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
1244
1245 DBG(" <- early_init_devtree()\n");
1246}
1247
1248#undef printk
1249
1250int
1251prom_n_addr_cells(struct device_node* np)
1252{
1253 int* ip;
1254 do {
1255 if (np->parent)
1256 np = np->parent;
1257 ip = (int *) get_property(np, "#address-cells", NULL);
1258 if (ip != NULL)
1259 return *ip;
1260 } while (np->parent);
1261 /* No #address-cells property for the root node, default to 1 */
1262 return 1;
1263}
1264EXPORT_SYMBOL_GPL(prom_n_addr_cells);
1265
1266int
1267prom_n_size_cells(struct device_node* np)
1268{
1269 int* ip;
1270 do {
1271 if (np->parent)
1272 np = np->parent;
1273 ip = (int *) get_property(np, "#size-cells", NULL);
1274 if (ip != NULL)
1275 return *ip;
1276 } while (np->parent);
1277 /* No #size-cells property for the root node, default to 1 */
1278 return 1;
1279}
1280EXPORT_SYMBOL_GPL(prom_n_size_cells);
1281
1282/**
1283 * Work out the sense (active-low level / active-high edge)
1284 * of each interrupt from the device tree.
1285 */
1286void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1287{
1288 struct device_node *np;
1289 int i, j;
1290
1291 /* default to level-triggered */
1292 memset(senses, 1, max - off);
1293
1294 for (np = allnodes; np != 0; np = np->allnext) {
1295 for (j = 0; j < np->n_intrs; j++) {
1296 i = np->intrs[j].line;
1297 if (i >= off && i < max)
1298 senses[i-off] = np->intrs[j].sense ?
1299 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE :
1300 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE;
1301 }
1302 }
1303}
1304
1305/**
1306 * Construct and return a list of the device_nodes with a given name.
1307 */
1308struct device_node *
1309find_devices(const char *name)
1310{
1311 struct device_node *head, **prevp, *np;
1312
1313 prevp = &head;
1314 for (np = allnodes; np != 0; np = np->allnext) {
1315 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1316 *prevp = np;
1317 prevp = &np->next;
1318 }
1319 }
1320 *prevp = NULL;
1321 return head;
1322}
1323EXPORT_SYMBOL(find_devices);
1324
1325/**
1326 * Construct and return a list of the device_nodes with a given type.
1327 */
1328struct device_node *
1329find_type_devices(const char *type)
1330{
1331 struct device_node *head, **prevp, *np;
1332
1333 prevp = &head;
1334 for (np = allnodes; np != 0; np = np->allnext) {
1335 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1336 *prevp = np;
1337 prevp = &np->next;
1338 }
1339 }
1340 *prevp = NULL;
1341 return head;
1342}
1343EXPORT_SYMBOL(find_type_devices);
1344
1345/**
1346 * Returns all nodes linked together
1347 */
1348struct device_node *
1349find_all_nodes(void)
1350{
1351 struct device_node *head, **prevp, *np;
1352
1353 prevp = &head;
1354 for (np = allnodes; np != 0; np = np->allnext) {
1355 *prevp = np;
1356 prevp = &np->next;
1357 }
1358 *prevp = NULL;
1359 return head;
1360}
1361EXPORT_SYMBOL(find_all_nodes);
1362
1363/** Checks if the given "compat" string matches one of the strings in
1364 * the device's "compatible" property
1365 */
1366int
1367device_is_compatible(struct device_node *device, const char *compat)
1368{
1369 const char* cp;
1370 int cplen, l;
1371
1372 cp = (char *) get_property(device, "compatible", &cplen);
1373 if (cp == NULL)
1374 return 0;
1375 while (cplen > 0) {
1376 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1377 return 1;
1378 l = strlen(cp) + 1;
1379 cp += l;
1380 cplen -= l;
1381 }
1382
1383 return 0;
1384}
1385EXPORT_SYMBOL(device_is_compatible);
1386
1387
1388/**
1389 * Indicates whether the root node has a given value in its
1390 * compatible property.
1391 */
1392int
1393machine_is_compatible(const char *compat)
1394{
1395 struct device_node *root;
1396 int rc = 0;
1397
1398 root = of_find_node_by_path("/");
1399 if (root) {
1400 rc = device_is_compatible(root, compat);
1401 of_node_put(root);
1402 }
1403 return rc;
1404}
1405EXPORT_SYMBOL(machine_is_compatible);
1406
1407/**
1408 * Construct and return a list of the device_nodes with a given type
1409 * and compatible property.
1410 */
1411struct device_node *
1412find_compatible_devices(const char *type, const char *compat)
1413{
1414 struct device_node *head, **prevp, *np;
1415
1416 prevp = &head;
1417 for (np = allnodes; np != 0; np = np->allnext) {
1418 if (type != NULL
1419 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1420 continue;
1421 if (device_is_compatible(np, compat)) {
1422 *prevp = np;
1423 prevp = &np->next;
1424 }
1425 }
1426 *prevp = NULL;
1427 return head;
1428}
1429EXPORT_SYMBOL(find_compatible_devices);
1430
1431/**
1432 * Find the device_node with a given full_name.
1433 */
1434struct device_node *
1435find_path_device(const char *path)
1436{
1437 struct device_node *np;
1438
1439 for (np = allnodes; np != 0; np = np->allnext)
1440 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1441 return np;
1442 return NULL;
1443}
1444EXPORT_SYMBOL(find_path_device);
1445
1446/*******
1447 *
1448 * New implementation of the OF "find" APIs, return a refcounted
1449 * object, call of_node_put() when done. The device tree and list
1450 * are protected by a rw_lock.
1451 *
1452 * Note that property management will need some locking as well,
1453 * this isn't dealt with yet.
1454 *
1455 *******/
1456
1457/**
1458 * of_find_node_by_name - Find a node by its "name" property
1459 * @from: The node to start searching from or NULL, the node
1460 * you pass will not be searched, only the next one
1461 * will; typically, you pass what the previous call
1462 * returned. of_node_put() will be called on it
1463 * @name: The name string to match against
1464 *
1465 * Returns a node pointer with refcount incremented, use
1466 * of_node_put() on it when done.
1467 */
1468struct device_node *of_find_node_by_name(struct device_node *from,
1469 const char *name)
1470{
1471 struct device_node *np;
1472
1473 read_lock(&devtree_lock);
1474 np = from ? from->allnext : allnodes;
1475 for (; np != 0; np = np->allnext)
1476 if (np->name != 0 && strcasecmp(np->name, name) == 0
1477 && of_node_get(np))
1478 break;
1479 if (from)
1480 of_node_put(from);
1481 read_unlock(&devtree_lock);
1482 return np;
1483}
1484EXPORT_SYMBOL(of_find_node_by_name);
1485
1486/**
1487 * of_find_node_by_type - Find a node by its "device_type" property
1488 * @from: The node to start searching from or NULL, the node
1489 * you pass will not be searched, only the next one
1490 * will; typically, you pass what the previous call
1491 * returned. of_node_put() will be called on it
1492 * @name: The type string to match against
1493 *
1494 * Returns a node pointer with refcount incremented, use
1495 * of_node_put() on it when done.
1496 */
1497struct device_node *of_find_node_by_type(struct device_node *from,
1498 const char *type)
1499{
1500 struct device_node *np;
1501
1502 read_lock(&devtree_lock);
1503 np = from ? from->allnext : allnodes;
1504 for (; np != 0; np = np->allnext)
1505 if (np->type != 0 && strcasecmp(np->type, type) == 0
1506 && of_node_get(np))
1507 break;
1508 if (from)
1509 of_node_put(from);
1510 read_unlock(&devtree_lock);
1511 return np;
1512}
1513EXPORT_SYMBOL(of_find_node_by_type);
1514
1515/**
1516 * of_find_compatible_node - Find a node based on type and one of the
1517 * tokens in its "compatible" property
1518 * @from: The node to start searching from or NULL, the node
1519 * you pass will not be searched, only the next one
1520 * will; typically, you pass what the previous call
1521 * returned. of_node_put() will be called on it
1522 * @type: The type string to match "device_type" or NULL to ignore
1523 * @compatible: The string to match to one of the tokens in the device
1524 * "compatible" list.
1525 *
1526 * Returns a node pointer with refcount incremented, use
1527 * of_node_put() on it when done.
1528 */
1529struct device_node *of_find_compatible_node(struct device_node *from,
1530 const char *type, const char *compatible)
1531{
1532 struct device_node *np;
1533
1534 read_lock(&devtree_lock);
1535 np = from ? from->allnext : allnodes;
1536 for (; np != 0; np = np->allnext) {
1537 if (type != NULL
1538 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1539 continue;
1540 if (device_is_compatible(np, compatible) && of_node_get(np))
1541 break;
1542 }
1543 if (from)
1544 of_node_put(from);
1545 read_unlock(&devtree_lock);
1546 return np;
1547}
1548EXPORT_SYMBOL(of_find_compatible_node);
1549
1550/**
1551 * of_find_node_by_path - Find a node matching a full OF path
1552 * @path: The full path to match
1553 *
1554 * Returns a node pointer with refcount incremented, use
1555 * of_node_put() on it when done.
1556 */
1557struct device_node *of_find_node_by_path(const char *path)
1558{
1559 struct device_node *np = allnodes;
1560
1561 read_lock(&devtree_lock);
1562 for (; np != 0; np = np->allnext) {
1563 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1564 && of_node_get(np))
1565 break;
1566 }
1567 read_unlock(&devtree_lock);
1568 return np;
1569}
1570EXPORT_SYMBOL(of_find_node_by_path);
1571
1572/**
1573 * of_find_node_by_phandle - Find a node given a phandle
1574 * @handle: phandle of the node to find
1575 *
1576 * Returns a node pointer with refcount incremented, use
1577 * of_node_put() on it when done.
1578 */
1579struct device_node *of_find_node_by_phandle(phandle handle)
1580{
1581 struct device_node *np;
1582
1583 read_lock(&devtree_lock);
1584 for (np = allnodes; np != 0; np = np->allnext)
1585 if (np->linux_phandle == handle)
1586 break;
1587 if (np)
1588 of_node_get(np);
1589 read_unlock(&devtree_lock);
1590 return np;
1591}
1592EXPORT_SYMBOL(of_find_node_by_phandle);
1593
1594/**
1595 * of_find_all_nodes - Get next node in global list
1596 * @prev: Previous node or NULL to start iteration
1597 * of_node_put() will be called on it
1598 *
1599 * Returns a node pointer with refcount incremented, use
1600 * of_node_put() on it when done.
1601 */
1602struct device_node *of_find_all_nodes(struct device_node *prev)
1603{
1604 struct device_node *np;
1605
1606 read_lock(&devtree_lock);
1607 np = prev ? prev->allnext : allnodes;
1608 for (; np != 0; np = np->allnext)
1609 if (of_node_get(np))
1610 break;
1611 if (prev)
1612 of_node_put(prev);
1613 read_unlock(&devtree_lock);
1614 return np;
1615}
1616EXPORT_SYMBOL(of_find_all_nodes);
1617
1618/**
1619 * of_get_parent - Get a node's parent if any
1620 * @node: Node to get parent
1621 *
1622 * Returns a node pointer with refcount incremented, use
1623 * of_node_put() on it when done.
1624 */
1625struct device_node *of_get_parent(const struct device_node *node)
1626{
1627 struct device_node *np;
1628
1629 if (!node)
1630 return NULL;
1631
1632 read_lock(&devtree_lock);
1633 np = of_node_get(node->parent);
1634 read_unlock(&devtree_lock);
1635 return np;
1636}
1637EXPORT_SYMBOL(of_get_parent);
1638
1639/**
1640 * of_get_next_child - Iterate a node childs
1641 * @node: parent node
1642 * @prev: previous child of the parent node, or NULL to get first
1643 *
1644 * Returns a node pointer with refcount incremented, use
1645 * of_node_put() on it when done.
1646 */
1647struct device_node *of_get_next_child(const struct device_node *node,
1648 struct device_node *prev)
1649{
1650 struct device_node *next;
1651
1652 read_lock(&devtree_lock);
1653 next = prev ? prev->sibling : node->child;
1654 for (; next != 0; next = next->sibling)
1655 if (of_node_get(next))
1656 break;
1657 if (prev)
1658 of_node_put(prev);
1659 read_unlock(&devtree_lock);
1660 return next;
1661}
1662EXPORT_SYMBOL(of_get_next_child);
1663
1664/**
1665 * of_node_get - Increment refcount of a node
1666 * @node: Node to inc refcount, NULL is supported to
1667 * simplify writing of callers
1668 *
1669 * Returns node.
1670 */
1671struct device_node *of_node_get(struct device_node *node)
1672{
1673 if (node)
1674 kref_get(&node->kref);
1675 return node;
1676}
1677EXPORT_SYMBOL(of_node_get);
1678
1679static inline struct device_node * kref_to_device_node(struct kref *kref)
1680{
1681 return container_of(kref, struct device_node, kref);
1682}
1683
1684/**
1685 * of_node_release - release a dynamically allocated node
1686 * @kref: kref element of the node to be released
1687 *
1688 * In of_node_put() this function is passed to kref_put()
1689 * as the destructor.
1690 */
1691static void of_node_release(struct kref *kref)
1692{
1693 struct device_node *node = kref_to_device_node(kref);
1694 struct property *prop = node->properties;
1695
1696 if (!OF_IS_DYNAMIC(node))
1697 return;
1698 while (prop) {
1699 struct property *next = prop->next;
1700 kfree(prop->name);
1701 kfree(prop->value);
1702 kfree(prop);
1703 prop = next;
1704 }
1705 kfree(node->intrs);
1706 kfree(node->addrs);
1707 kfree(node->full_name);
1708 kfree(node->data);
1709 kfree(node);
1710}
1711
1712/**
1713 * of_node_put - Decrement refcount of a node
1714 * @node: Node to dec refcount, NULL is supported to
1715 * simplify writing of callers
1716 *
1717 */
1718void of_node_put(struct device_node *node)
1719{
1720 if (node)
1721 kref_put(&node->kref, of_node_release);
1722}
1723EXPORT_SYMBOL(of_node_put);
1724
1725/*
1726 * Fix up the uninitialized fields in a new device node:
1727 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1728 *
1729 * A lot of boot-time code is duplicated here, because functions such
1730 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1731 * slab allocator.
1732 *
1733 * This should probably be split up into smaller chunks.
1734 */
1735
1736static int of_finish_dynamic_node(struct device_node *node,
1737 unsigned long *unused1, int unused2,
1738 int unused3, int unused4)
1739{
1740 struct device_node *parent = of_get_parent(node);
1741 int err = 0;
1742 phandle *ibm_phandle;
1743
1744 node->name = get_property(node, "name", NULL);
1745 node->type = get_property(node, "device_type", NULL);
1746
1747 if (!parent) {
1748 err = -ENODEV;
1749 goto out;
1750 }
1751
1752 /* We don't support that function on PowerMac, at least
1753 * not yet
1754 */
1755 if (_machine == PLATFORM_POWERMAC)
1756 return -ENODEV;
1757
1758 /* fix up new node's linux_phandle field */
1759 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1760 node->linux_phandle = *ibm_phandle;
1761
1762out:
1763 of_node_put(parent);
1764 return err;
1765}
1766
1767/*
1768 * Plug a device node into the tree and global list.
1769 */
1770void of_attach_node(struct device_node *np)
1771{
1772 write_lock(&devtree_lock);
1773 np->sibling = np->parent->child;
1774 np->allnext = allnodes;
1775 np->parent->child = np;
1776 allnodes = np;
1777 write_unlock(&devtree_lock);
1778}
1779
1780/*
1781 * "Unplug" a node from the device tree. The caller must hold
1782 * a reference to the node. The memory associated with the node
1783 * is not freed until its refcount goes to zero.
1784 */
1785void of_detach_node(const struct device_node *np)
1786{
1787 struct device_node *parent;
1788
1789 write_lock(&devtree_lock);
1790
1791 parent = np->parent;
1792
1793 if (allnodes == np)
1794 allnodes = np->allnext;
1795 else {
1796 struct device_node *prev;
1797 for (prev = allnodes;
1798 prev->allnext != np;
1799 prev = prev->allnext)
1800 ;
1801 prev->allnext = np->allnext;
1802 }
1803
1804 if (parent->child == np)
1805 parent->child = np->sibling;
1806 else {
1807 struct device_node *prevsib;
1808 for (prevsib = np->parent->child;
1809 prevsib->sibling != np;
1810 prevsib = prevsib->sibling)
1811 ;
1812 prevsib->sibling = np->sibling;
1813 }
1814
1815 write_unlock(&devtree_lock);
1816}
1817
1818static int prom_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
1819{
1820 int err;
1821
1822 switch (action) {
1823 case PSERIES_RECONFIG_ADD:
1824 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1825 if (err < 0) {
1826 printk(KERN_ERR "finish_node returned %d\n", err);
1827 err = NOTIFY_BAD;
1828 }
1829 break;
1830 default:
1831 err = NOTIFY_DONE;
1832 break;
1833 }
1834 return err;
1835}
1836
1837static struct notifier_block prom_reconfig_nb = {
1838 .notifier_call = prom_reconfig_notifier,
1839 .priority = 10, /* This one needs to run first */
1840};
1841
1842static int __init prom_reconfig_setup(void)
1843{
1844 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1845}
1846__initcall(prom_reconfig_setup);
1847
1848/*
1849 * Find a property with a given name for a given node
1850 * and return the value.
1851 */
1852unsigned char *
1853get_property(struct device_node *np, const char *name, int *lenp)
1854{
1855 struct property *pp;
1856
1857 for (pp = np->properties; pp != 0; pp = pp->next)
1858 if (strcmp(pp->name, name) == 0) {
1859 if (lenp != 0)
1860 *lenp = pp->length;
1861 return pp->value;
1862 }
1863 return NULL;
1864}
1865EXPORT_SYMBOL(get_property);
1866
1867/*
1868 * Add a property to a node.
1869 */
1870int
1871prom_add_property(struct device_node* np, struct property* prop)
1872{
1873 struct property **next;
1874
1875 prop->next = NULL;
1876 write_lock(&devtree_lock);
1877 next = &np->properties;
1878 while (*next) {
1879 if (strcmp(prop->name, (*next)->name) == 0) {
1880 /* duplicate ! don't insert it */
1881 write_unlock(&devtree_lock);
1882 return -1;
1883 }
1884 next = &(*next)->next;
1885 }
1886 *next = prop;
1887 write_unlock(&devtree_lock);
1888
1889 /* try to add to proc as well if it was initialized */
1890 if (np->pde)
1891 proc_device_tree_add_prop(np->pde, prop);
1892
1893 return 0;
1894}
1895
1896#if 0
1897void
1898print_properties(struct device_node *np)
1899{
1900 struct property *pp;
1901 char *cp;
1902 int i, n;
1903
1904 for (pp = np->properties; pp != 0; pp = pp->next) {
1905 printk(KERN_INFO "%s", pp->name);
1906 for (i = strlen(pp->name); i < 16; ++i)
1907 printk(" ");
1908 cp = (char *) pp->value;
1909 for (i = pp->length; i > 0; --i, ++cp)
1910 if ((i > 1 && (*cp < 0x20 || *cp > 0x7e))
1911 || (i == 1 && *cp != 0))
1912 break;
1913 if (i == 0 && pp->length > 1) {
1914 /* looks like a string */
1915 printk(" %s\n", (char *) pp->value);
1916 } else {
1917 /* dump it in hex */
1918 n = pp->length;
1919 if (n > 64)
1920 n = 64;
1921 if (pp->length % 4 == 0) {
1922 unsigned int *p = (unsigned int *) pp->value;
1923
1924 n /= 4;
1925 for (i = 0; i < n; ++i) {
1926 if (i != 0 && (i % 4) == 0)
1927 printk("\n ");
1928 printk(" %08x", *p++);
1929 }
1930 } else {
1931 unsigned char *bp = pp->value;
1932
1933 for (i = 0; i < n; ++i) {
1934 if (i != 0 && (i % 16) == 0)
1935 printk("\n ");
1936 printk(" %02x", *bp++);
1937 }
1938 }
1939 printk("\n");
1940 if (pp->length > 64)
1941 printk(" ... (length = %d)\n",
1942 pp->length);
1943 }
1944 }
1945}
1946#endif
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
deleted file mode 100644
index 6375f40b23db..000000000000
--- a/arch/ppc64/kernel/prom_init.c
+++ /dev/null
@@ -1,2051 +0,0 @@
1/*
2 *
3 *
4 * Procedures for interfacing to Open Firmware.
5 *
6 * Paul Mackerras August 1996.
7 * Copyright (C) 1996 Paul Mackerras.
8 *
9 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
10 * {engebret|bergner}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#undef DEBUG_PROM
19
20#include <stdarg.h>
21#include <linux/config.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/threads.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29#include <linux/proc_fs.h>
30#include <linux/stringify.h>
31#include <linux/delay.h>
32#include <linux/initrd.h>
33#include <linux/bitops.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36#include <asm/abs_addr.h>
37#include <asm/page.h>
38#include <asm/processor.h>
39#include <asm/irq.h>
40#include <asm/io.h>
41#include <asm/smp.h>
42#include <asm/system.h>
43#include <asm/mmu.h>
44#include <asm/pgtable.h>
45#include <asm/pci.h>
46#include <asm/iommu.h>
47#include <asm/btext.h>
48#include <asm/sections.h>
49#include <asm/machdep.h>
50
51#ifdef CONFIG_LOGO_LINUX_CLUT224
52#include <linux/linux_logo.h>
53extern const struct linux_logo logo_linux_clut224;
54#endif
55
56/*
57 * Properties whose value is longer than this get excluded from our
58 * copy of the device tree. This value does need to be big enough to
59 * ensure that we don't lose things like the interrupt-map property
60 * on a PCI-PCI bridge.
61 */
62#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
63
64/*
65 * Eventually bump that one up
66 */
67#define DEVTREE_CHUNK_SIZE 0x100000
68
69/*
70 * This is the size of the local memory reserve map that gets copied
71 * into the boot params passed to the kernel. That size is totally
72 * flexible as the kernel just reads the list until it encounters an
73 * entry with size 0, so it can be changed without breaking binary
74 * compatibility
75 */
76#define MEM_RESERVE_MAP_SIZE 8
77
78/*
79 * prom_init() is called very early on, before the kernel text
80 * and data have been mapped to KERNELBASE. At this point the code
81 * is running at whatever address it has been loaded at, so
82 * references to extern and static variables must be relocated
83 * explicitly. The procedure reloc_offset() returns the address
84 * we're currently running at minus the address we were linked at.
85 * (Note that strings count as static variables.)
86 *
87 * Because OF may have mapped I/O devices into the area starting at
88 * KERNELBASE, particularly on CHRP machines, we can't safely call
89 * OF once the kernel has been mapped to KERNELBASE. Therefore all
90 * OF calls should be done within prom_init(), and prom_init()
91 * and all routines called within it must be careful to relocate
92 * references as necessary.
93 *
94 * Note that the bss is cleared *after* prom_init runs, so we have
95 * to make sure that any static or extern variables it accesses
96 * are put in the data segment.
97 */
98
99
100#define PROM_BUG() do { \
101 prom_printf("kernel BUG at %s line 0x%x!\n", \
102 RELOC(__FILE__), __LINE__); \
103 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
104} while (0)
105
106#ifdef DEBUG_PROM
107#define prom_debug(x...) prom_printf(x)
108#else
109#define prom_debug(x...)
110#endif
111
112
113typedef u32 prom_arg_t;
114
115struct prom_args {
116 u32 service;
117 u32 nargs;
118 u32 nret;
119 prom_arg_t args[10];
120 prom_arg_t *rets; /* Pointer to return values in args[16]. */
121};
122
123struct prom_t {
124 unsigned long entry;
125 ihandle root;
126 ihandle chosen;
127 int cpu;
128 ihandle stdout;
129 ihandle disp_node;
130 struct prom_args args;
131 unsigned long version;
132 unsigned long root_size_cells;
133 unsigned long root_addr_cells;
134};
135
136struct pci_reg_property {
137 struct pci_address addr;
138 u32 size_hi;
139 u32 size_lo;
140};
141
142struct mem_map_entry {
143 u64 base;
144 u64 size;
145};
146
147typedef u32 cell_t;
148
149extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
150
151extern void enter_prom(struct prom_args *args, unsigned long entry);
152extern void copy_and_flush(unsigned long dest, unsigned long src,
153 unsigned long size, unsigned long offset);
154
155extern unsigned long klimit;
156
157/* prom structure */
158static struct prom_t __initdata prom;
159
160#define PROM_SCRATCH_SIZE 256
161
162static char __initdata of_stdout_device[256];
163static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
164
165static unsigned long __initdata dt_header_start;
166static unsigned long __initdata dt_struct_start, dt_struct_end;
167static unsigned long __initdata dt_string_start, dt_string_end;
168
169static unsigned long __initdata prom_initrd_start, prom_initrd_end;
170
171static int __initdata iommu_force_on;
172static int __initdata ppc64_iommu_off;
173static int __initdata of_platform;
174
175static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
176
177static unsigned long __initdata prom_memory_limit;
178static unsigned long __initdata prom_tce_alloc_start;
179static unsigned long __initdata prom_tce_alloc_end;
180
181static unsigned long __initdata alloc_top;
182static unsigned long __initdata alloc_top_high;
183static unsigned long __initdata alloc_bottom;
184static unsigned long __initdata rmo_top;
185static unsigned long __initdata ram_top;
186
187static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
188static int __initdata mem_reserve_cnt;
189
190static cell_t __initdata regbuf[1024];
191
192
193#define MAX_CPU_THREADS 2
194
195/* TO GO */
196#ifdef CONFIG_HMT
197struct {
198 unsigned int pir;
199 unsigned int threadid;
200} hmt_thread_data[NR_CPUS];
201#endif /* CONFIG_HMT */
202
203/*
204 * This are used in calls to call_prom. The 4th and following
205 * arguments to call_prom should be 32-bit values. 64 bit values
206 * are truncated to 32 bits (and fortunately don't get interpreted
207 * as two arguments).
208 */
209#define ADDR(x) (u32) ((unsigned long)(x) - offset)
210
211/*
212 * Error results ... some OF calls will return "-1" on error, some
213 * will return 0, some will return either. To simplify, here are
214 * macros to use with any ihandle or phandle return value to check if
215 * it is valid
216 */
217
218#define PROM_ERROR (-1u)
219#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
220#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
221
222
223/* This is the one and *ONLY* place where we actually call open
224 * firmware from, since we need to make sure we're running in 32b
225 * mode when we do. We switch back to 64b mode upon return.
226 */
227
228static int __init call_prom(const char *service, int nargs, int nret, ...)
229{
230 int i;
231 unsigned long offset = reloc_offset();
232 struct prom_t *_prom = PTRRELOC(&prom);
233 va_list list;
234
235 _prom->args.service = ADDR(service);
236 _prom->args.nargs = nargs;
237 _prom->args.nret = nret;
238 _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
239
240 va_start(list, nret);
241 for (i=0; i < nargs; i++)
242 _prom->args.args[i] = va_arg(list, prom_arg_t);
243 va_end(list);
244
245 for (i=0; i < nret ;i++)
246 _prom->args.rets[i] = 0;
247
248 enter_prom(&_prom->args, _prom->entry);
249
250 return (nret > 0) ? _prom->args.rets[0] : 0;
251}
252
253
254static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
255 unsigned long align)
256{
257 return (unsigned int)call_prom("claim", 3, 1,
258 (prom_arg_t)virt, (prom_arg_t)size,
259 (prom_arg_t)align);
260}
261
262static void __init prom_print(const char *msg)
263{
264 const char *p, *q;
265 unsigned long offset = reloc_offset();
266 struct prom_t *_prom = PTRRELOC(&prom);
267
268 if (_prom->stdout == 0)
269 return;
270
271 for (p = msg; *p != 0; p = q) {
272 for (q = p; *q != 0 && *q != '\n'; ++q)
273 ;
274 if (q > p)
275 call_prom("write", 3, 1, _prom->stdout, p, q - p);
276 if (*q == 0)
277 break;
278 ++q;
279 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
280 }
281}
282
283
284static void __init prom_print_hex(unsigned long val)
285{
286 unsigned long offset = reloc_offset();
287 int i, nibbles = sizeof(val)*2;
288 char buf[sizeof(val)*2+1];
289 struct prom_t *_prom = PTRRELOC(&prom);
290
291 for (i = nibbles-1; i >= 0; i--) {
292 buf[i] = (val & 0xf) + '0';
293 if (buf[i] > '9')
294 buf[i] += ('a'-'0'-10);
295 val >>= 4;
296 }
297 buf[nibbles] = '\0';
298 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
299}
300
301
302static void __init prom_printf(const char *format, ...)
303{
304 unsigned long offset = reloc_offset();
305 const char *p, *q, *s;
306 va_list args;
307 unsigned long v;
308 struct prom_t *_prom = PTRRELOC(&prom);
309
310 va_start(args, format);
311 for (p = PTRRELOC(format); *p != 0; p = q) {
312 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
313 ;
314 if (q > p)
315 call_prom("write", 3, 1, _prom->stdout, p, q - p);
316 if (*q == 0)
317 break;
318 if (*q == '\n') {
319 ++q;
320 call_prom("write", 3, 1, _prom->stdout,
321 ADDR("\r\n"), 2);
322 continue;
323 }
324 ++q;
325 if (*q == 0)
326 break;
327 switch (*q) {
328 case 's':
329 ++q;
330 s = va_arg(args, const char *);
331 prom_print(s);
332 break;
333 case 'x':
334 ++q;
335 v = va_arg(args, unsigned long);
336 prom_print_hex(v);
337 break;
338 }
339 }
340}
341
342
343static void __init __attribute__((noreturn)) prom_panic(const char *reason)
344{
345 unsigned long offset = reloc_offset();
346
347 prom_print(PTRRELOC(reason));
348 /* ToDo: should put up an SRC here */
349 call_prom("exit", 0, 0);
350
351 for (;;) /* should never get here */
352 ;
353}
354
355
356static int __init prom_next_node(phandle *nodep)
357{
358 phandle node;
359
360 if ((node = *nodep) != 0
361 && (*nodep = call_prom("child", 1, 1, node)) != 0)
362 return 1;
363 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
364 return 1;
365 for (;;) {
366 if ((node = call_prom("parent", 1, 1, node)) == 0)
367 return 0;
368 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
369 return 1;
370 }
371}
372
373static int __init prom_getprop(phandle node, const char *pname,
374 void *value, size_t valuelen)
375{
376 unsigned long offset = reloc_offset();
377
378 return call_prom("getprop", 4, 1, node, ADDR(pname),
379 (u32)(unsigned long) value, (u32) valuelen);
380}
381
382static int __init prom_getproplen(phandle node, const char *pname)
383{
384 unsigned long offset = reloc_offset();
385
386 return call_prom("getproplen", 2, 1, node, ADDR(pname));
387}
388
389static int __init prom_setprop(phandle node, const char *pname,
390 void *value, size_t valuelen)
391{
392 unsigned long offset = reloc_offset();
393
394 return call_prom("setprop", 4, 1, node, ADDR(pname),
395 (u32)(unsigned long) value, (u32) valuelen);
396}
397
398/* We can't use the standard versions because of RELOC headaches. */
399#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
400 || ('a' <= (c) && (c) <= 'f') \
401 || ('A' <= (c) && (c) <= 'F'))
402
403#define isdigit(c) ('0' <= (c) && (c) <= '9')
404#define islower(c) ('a' <= (c) && (c) <= 'z')
405#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
406
407unsigned long prom_strtoul(const char *cp, const char **endp)
408{
409 unsigned long result = 0, base = 10, value;
410
411 if (*cp == '0') {
412 base = 8;
413 cp++;
414 if (toupper(*cp) == 'X') {
415 cp++;
416 base = 16;
417 }
418 }
419
420 while (isxdigit(*cp) &&
421 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
422 result = result * base + value;
423 cp++;
424 }
425
426 if (endp)
427 *endp = cp;
428
429 return result;
430}
431
432unsigned long prom_memparse(const char *ptr, const char **retptr)
433{
434 unsigned long ret = prom_strtoul(ptr, retptr);
435 int shift = 0;
436
437 /*
438 * We can't use a switch here because GCC *may* generate a
439 * jump table which won't work, because we're not running at
440 * the address we're linked at.
441 */
442 if ('G' == **retptr || 'g' == **retptr)
443 shift = 30;
444
445 if ('M' == **retptr || 'm' == **retptr)
446 shift = 20;
447
448 if ('K' == **retptr || 'k' == **retptr)
449 shift = 10;
450
451 if (shift) {
452 ret <<= shift;
453 (*retptr)++;
454 }
455
456 return ret;
457}
458
459/*
460 * Early parsing of the command line passed to the kernel, used for
461 * "mem=x" and the options that affect the iommu
462 */
463static void __init early_cmdline_parse(void)
464{
465 unsigned long offset = reloc_offset();
466 struct prom_t *_prom = PTRRELOC(&prom);
467 char *opt, *p;
468 int l = 0;
469
470 RELOC(prom_cmd_line[0]) = 0;
471 p = RELOC(prom_cmd_line);
472 if ((long)_prom->chosen > 0)
473 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
474#ifdef CONFIG_CMDLINE
475 if (l == 0) /* dbl check */
476 strlcpy(RELOC(prom_cmd_line),
477 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
478#endif /* CONFIG_CMDLINE */
479 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
480
481 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
482 if (opt) {
483 prom_printf("iommu opt is: %s\n", opt);
484 opt += 6;
485 while (*opt && *opt == ' ')
486 opt++;
487 if (!strncmp(opt, RELOC("off"), 3))
488 RELOC(ppc64_iommu_off) = 1;
489 else if (!strncmp(opt, RELOC("force"), 5))
490 RELOC(iommu_force_on) = 1;
491 }
492
493 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
494 if (opt) {
495 opt += 4;
496 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
497 /* Align to 16 MB == size of large page */
498 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
499 }
500}
501
502/*
503 * To tell the firmware what our capabilities are, we have to pass
504 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
505 * that contain structures that contain the actual values.
506 */
507static struct fake_elf {
508 Elf32_Ehdr elfhdr;
509 Elf32_Phdr phdr[2];
510 struct chrpnote {
511 u32 namesz;
512 u32 descsz;
513 u32 type;
514 char name[8]; /* "PowerPC" */
515 struct chrpdesc {
516 u32 real_mode;
517 u32 real_base;
518 u32 real_size;
519 u32 virt_base;
520 u32 virt_size;
521 u32 load_base;
522 } chrpdesc;
523 } chrpnote;
524 struct rpanote {
525 u32 namesz;
526 u32 descsz;
527 u32 type;
528 char name[24]; /* "IBM,RPA-Client-Config" */
529 struct rpadesc {
530 u32 lpar_affinity;
531 u32 min_rmo_size;
532 u32 min_rmo_percent;
533 u32 max_pft_size;
534 u32 splpar;
535 u32 min_load;
536 u32 new_mem_def;
537 u32 ignore_me;
538 } rpadesc;
539 } rpanote;
540} fake_elf = {
541 .elfhdr = {
542 .e_ident = { 0x7f, 'E', 'L', 'F',
543 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
544 .e_type = ET_EXEC, /* yeah right */
545 .e_machine = EM_PPC,
546 .e_version = EV_CURRENT,
547 .e_phoff = offsetof(struct fake_elf, phdr),
548 .e_phentsize = sizeof(Elf32_Phdr),
549 .e_phnum = 2
550 },
551 .phdr = {
552 [0] = {
553 .p_type = PT_NOTE,
554 .p_offset = offsetof(struct fake_elf, chrpnote),
555 .p_filesz = sizeof(struct chrpnote)
556 }, [1] = {
557 .p_type = PT_NOTE,
558 .p_offset = offsetof(struct fake_elf, rpanote),
559 .p_filesz = sizeof(struct rpanote)
560 }
561 },
562 .chrpnote = {
563 .namesz = sizeof("PowerPC"),
564 .descsz = sizeof(struct chrpdesc),
565 .type = 0x1275,
566 .name = "PowerPC",
567 .chrpdesc = {
568 .real_mode = ~0U, /* ~0 means "don't care" */
569 .real_base = ~0U,
570 .real_size = ~0U,
571 .virt_base = ~0U,
572 .virt_size = ~0U,
573 .load_base = ~0U
574 },
575 },
576 .rpanote = {
577 .namesz = sizeof("IBM,RPA-Client-Config"),
578 .descsz = sizeof(struct rpadesc),
579 .type = 0x12759999,
580 .name = "IBM,RPA-Client-Config",
581 .rpadesc = {
582 .lpar_affinity = 0,
583 .min_rmo_size = 64, /* in megabytes */
584 .min_rmo_percent = 0,
585 .max_pft_size = 48, /* 2^48 bytes max PFT size */
586 .splpar = 1,
587 .min_load = ~0U,
588 .new_mem_def = 0
589 }
590 }
591};
592
593static void __init prom_send_capabilities(void)
594{
595 unsigned long offset = reloc_offset();
596 ihandle elfloader;
597
598 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
599 if (elfloader == 0) {
600 prom_printf("couldn't open /packages/elf-loader\n");
601 return;
602 }
603 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
604 elfloader, ADDR(&fake_elf));
605 call_prom("close", 1, 0, elfloader);
606}
607
608/*
609 * Memory allocation strategy... our layout is normally:
610 *
611 * at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd
612 * might end up beeing before the kernel though. We assume this won't override
613 * the final kernel at 0, we have no provision to handle that in this version,
614 * but it should hopefully never happen.
615 *
616 * alloc_top is set to the top of RMO, eventually shrink down if the TCEs overlap
617 * alloc_bottom is set to the top of kernel/initrd
618 *
619 * from there, allocations are done that way : rtas is allocated topmost, and
620 * the device-tree is allocated from the bottom. We try to grow the device-tree
621 * allocation as we progress. If we can't, then we fail, we don't currently have
622 * a facility to restart elsewhere, but that shouldn't be necessary neither
623 *
624 * Note that calls to reserve_mem have to be done explicitely, memory allocated
625 * with either alloc_up or alloc_down isn't automatically reserved.
626 */
627
628
629/*
630 * Allocates memory in the RMO upward from the kernel/initrd
631 *
632 * When align is 0, this is a special case, it means to allocate in place
633 * at the current location of alloc_bottom or fail (that is basically
634 * extending the previous allocation). Used for the device-tree flattening
635 */
636static unsigned long __init alloc_up(unsigned long size, unsigned long align)
637{
638 unsigned long offset = reloc_offset();
639 unsigned long base = _ALIGN_UP(RELOC(alloc_bottom), align);
640 unsigned long addr = 0;
641
642 prom_debug("alloc_up(%x, %x)\n", size, align);
643 if (RELOC(ram_top) == 0)
644 prom_panic("alloc_up() called with mem not initialized\n");
645
646 if (align)
647 base = _ALIGN_UP(RELOC(alloc_bottom), align);
648 else
649 base = RELOC(alloc_bottom);
650
651 for(; (base + size) <= RELOC(alloc_top);
652 base = _ALIGN_UP(base + 0x100000, align)) {
653 prom_debug(" trying: 0x%x\n\r", base);
654 addr = (unsigned long)prom_claim(base, size, 0);
655 if (addr != PROM_ERROR)
656 break;
657 addr = 0;
658 if (align == 0)
659 break;
660 }
661 if (addr == 0)
662 return 0;
663 RELOC(alloc_bottom) = addr;
664
665 prom_debug(" -> %x\n", addr);
666 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
667 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
668 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
669 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
670 prom_debug(" ram_top : %x\n", RELOC(ram_top));
671
672 return addr;
673}
674
675/*
676 * Allocates memory downard, either from top of RMO, or if highmem
677 * is set, from the top of RAM. Note that this one doesn't handle
678 * failures. In does claim memory if highmem is not set.
679 */
680static unsigned long __init alloc_down(unsigned long size, unsigned long align,
681 int highmem)
682{
683 unsigned long offset = reloc_offset();
684 unsigned long base, addr = 0;
685
686 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
687 highmem ? RELOC("(high)") : RELOC("(low)"));
688 if (RELOC(ram_top) == 0)
689 prom_panic("alloc_down() called with mem not initialized\n");
690
691 if (highmem) {
692 /* Carve out storage for the TCE table. */
693 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
694 if (addr <= RELOC(alloc_bottom))
695 return 0;
696 else {
697 /* Will we bump into the RMO ? If yes, check out that we
698 * didn't overlap existing allocations there, if we did,
699 * we are dead, we must be the first in town !
700 */
701 if (addr < RELOC(rmo_top)) {
702 /* Good, we are first */
703 if (RELOC(alloc_top) == RELOC(rmo_top))
704 RELOC(alloc_top) = RELOC(rmo_top) = addr;
705 else
706 return 0;
707 }
708 RELOC(alloc_top_high) = addr;
709 }
710 goto bail;
711 }
712
713 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
714 for(; base > RELOC(alloc_bottom); base = _ALIGN_DOWN(base - 0x100000, align)) {
715 prom_debug(" trying: 0x%x\n\r", base);
716 addr = (unsigned long)prom_claim(base, size, 0);
717 if (addr != PROM_ERROR)
718 break;
719 addr = 0;
720 }
721 if (addr == 0)
722 return 0;
723 RELOC(alloc_top) = addr;
724
725 bail:
726 prom_debug(" -> %x\n", addr);
727 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
728 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
729 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
730 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
731 prom_debug(" ram_top : %x\n", RELOC(ram_top));
732
733 return addr;
734}
735
736/*
737 * Parse a "reg" cell
738 */
739static unsigned long __init prom_next_cell(int s, cell_t **cellp)
740{
741 cell_t *p = *cellp;
742 unsigned long r = 0;
743
744 /* Ignore more than 2 cells */
745 while (s > 2) {
746 p++;
747 s--;
748 }
749 while (s) {
750 r <<= 32;
751 r |= *(p++);
752 s--;
753 }
754
755 *cellp = p;
756 return r;
757}
758
759/*
760 * Very dumb function for adding to the memory reserve list, but
761 * we don't need anything smarter at this point
762 *
763 * XXX Eventually check for collisions. They should NEVER happen
764 * if problems seem to show up, it would be a good start to track
765 * them down.
766 */
767static void reserve_mem(unsigned long base, unsigned long size)
768{
769 unsigned long offset = reloc_offset();
770 unsigned long top = base + size;
771 unsigned long cnt = RELOC(mem_reserve_cnt);
772
773 if (size == 0)
774 return;
775
776 /* We need to always keep one empty entry so that we
777 * have our terminator with "size" set to 0 since we are
778 * dumb and just copy this entire array to the boot params
779 */
780 base = _ALIGN_DOWN(base, PAGE_SIZE);
781 top = _ALIGN_UP(top, PAGE_SIZE);
782 size = top - base;
783
784 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
785 prom_panic("Memory reserve map exhausted !\n");
786 RELOC(mem_reserve_map)[cnt].base = base;
787 RELOC(mem_reserve_map)[cnt].size = size;
788 RELOC(mem_reserve_cnt) = cnt + 1;
789}
790
791/*
792 * Initialize memory allocation mecanism, parse "memory" nodes and
793 * obtain that way the top of memory and RMO to setup out local allocator
794 */
795static void __init prom_init_mem(void)
796{
797 phandle node;
798 char *path, type[64];
799 unsigned int plen;
800 cell_t *p, *endp;
801 unsigned long offset = reloc_offset();
802 struct prom_t *_prom = PTRRELOC(&prom);
803
804 /*
805 * We iterate the memory nodes to find
806 * 1) top of RMO (first node)
807 * 2) top of memory
808 */
809 prom_debug("root_addr_cells: %x\n", (long)_prom->root_addr_cells);
810 prom_debug("root_size_cells: %x\n", (long)_prom->root_size_cells);
811
812 prom_debug("scanning memory:\n");
813 path = RELOC(prom_scratch);
814
815 for (node = 0; prom_next_node(&node); ) {
816 type[0] = 0;
817 prom_getprop(node, "device_type", type, sizeof(type));
818
819 if (strcmp(type, RELOC("memory")))
820 continue;
821
822 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
823 if (plen > sizeof(regbuf)) {
824 prom_printf("memory node too large for buffer !\n");
825 plen = sizeof(regbuf);
826 }
827 p = RELOC(regbuf);
828 endp = p + (plen / sizeof(cell_t));
829
830#ifdef DEBUG_PROM
831 memset(path, 0, PROM_SCRATCH_SIZE);
832 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
833 prom_debug(" node %s :\n", path);
834#endif /* DEBUG_PROM */
835
836 while ((endp - p) >= (_prom->root_addr_cells + _prom->root_size_cells)) {
837 unsigned long base, size;
838
839 base = prom_next_cell(_prom->root_addr_cells, &p);
840 size = prom_next_cell(_prom->root_size_cells, &p);
841
842 if (size == 0)
843 continue;
844 prom_debug(" %x %x\n", base, size);
845 if (base == 0)
846 RELOC(rmo_top) = size;
847 if ((base + size) > RELOC(ram_top))
848 RELOC(ram_top) = base + size;
849 }
850 }
851
852 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(klimit) - offset + 0x4000);
853
854 /* Check if we have an initrd after the kernel, if we do move our bottom
855 * point to after it
856 */
857 if (RELOC(prom_initrd_start)) {
858 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
859 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
860 }
861
862 /*
863 * If prom_memory_limit is set we reduce the upper limits *except* for
864 * alloc_top_high. This must be the real top of RAM so we can put
865 * TCE's up there.
866 */
867
868 RELOC(alloc_top_high) = RELOC(ram_top);
869
870 if (RELOC(prom_memory_limit)) {
871 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
872 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
873 RELOC(prom_memory_limit));
874 RELOC(prom_memory_limit) = 0;
875 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
876 prom_printf("Ignoring mem=%x >= ram_top.\n",
877 RELOC(prom_memory_limit));
878 RELOC(prom_memory_limit) = 0;
879 } else {
880 RELOC(ram_top) = RELOC(prom_memory_limit);
881 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
882 }
883 }
884
885 /*
886 * Setup our top alloc point, that is top of RMO or top of
887 * segment 0 when running non-LPAR.
888 */
889 if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR )
890 RELOC(alloc_top) = RELOC(rmo_top);
891 else
892 /* Some RS64 machines have buggy firmware where claims up at 1GB
893 * fails. Cap at 768MB as a workaround. Still plenty of room.
894 */
895 RELOC(alloc_top) = RELOC(rmo_top) = min(0x30000000ul, RELOC(ram_top));
896
897 prom_printf("memory layout at init:\n");
898 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
899 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
900 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
901 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
902 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
903 prom_printf(" ram_top : %x\n", RELOC(ram_top));
904}
905
906
907/*
908 * Allocate room for and instanciate RTAS
909 */
910static void __init prom_instantiate_rtas(void)
911{
912 unsigned long offset = reloc_offset();
913 struct prom_t *_prom = PTRRELOC(&prom);
914 phandle rtas_node;
915 ihandle rtas_inst;
916 u32 base, entry = 0;
917 u32 size = 0;
918
919 prom_debug("prom_instantiate_rtas: start...\n");
920
921 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
922 prom_debug("rtas_node: %x\n", rtas_node);
923 if (!PHANDLE_VALID(rtas_node))
924 return;
925
926 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
927 if (size == 0)
928 return;
929
930 base = alloc_down(size, PAGE_SIZE, 0);
931 if (base == 0) {
932 prom_printf("RTAS allocation failed !\n");
933 return;
934 }
935
936 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
937 if (!IHANDLE_VALID(rtas_inst)) {
938 prom_printf("opening rtas package failed");
939 return;
940 }
941
942 prom_printf("instantiating rtas at 0x%x ...", base);
943
944 if (call_prom("call-method", 3, 2,
945 ADDR("instantiate-rtas"),
946 rtas_inst, base) != PROM_ERROR) {
947 entry = (long)_prom->args.rets[1];
948 }
949 if (entry == 0) {
950 prom_printf(" failed\n");
951 return;
952 }
953 prom_printf(" done\n");
954
955 reserve_mem(base, size);
956
957 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
958 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
959
960 prom_debug("rtas base = 0x%x\n", base);
961 prom_debug("rtas entry = 0x%x\n", entry);
962 prom_debug("rtas size = 0x%x\n", (long)size);
963
964 prom_debug("prom_instantiate_rtas: end...\n");
965}
966
967
968/*
969 * Allocate room for and initialize TCE tables
970 */
971static void __init prom_initialize_tce_table(void)
972{
973 phandle node;
974 ihandle phb_node;
975 unsigned long offset = reloc_offset();
976 char compatible[64], type[64], model[64];
977 char *path = RELOC(prom_scratch);
978 u64 base, align;
979 u32 minalign, minsize;
980 u64 tce_entry, *tce_entryp;
981 u64 local_alloc_top, local_alloc_bottom;
982 u64 i;
983
984 if (RELOC(ppc64_iommu_off))
985 return;
986
987 prom_debug("starting prom_initialize_tce_table\n");
988
989 /* Cache current top of allocs so we reserve a single block */
990 local_alloc_top = RELOC(alloc_top_high);
991 local_alloc_bottom = local_alloc_top;
992
993 /* Search all nodes looking for PHBs. */
994 for (node = 0; prom_next_node(&node); ) {
995 compatible[0] = 0;
996 type[0] = 0;
997 model[0] = 0;
998 prom_getprop(node, "compatible",
999 compatible, sizeof(compatible));
1000 prom_getprop(node, "device_type", type, sizeof(type));
1001 prom_getprop(node, "model", model, sizeof(model));
1002
1003 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1004 continue;
1005
1006 /* Keep the old logic in tack to avoid regression. */
1007 if (compatible[0] != 0) {
1008 if ((strstr(compatible, RELOC("python")) == NULL) &&
1009 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1010 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1011 continue;
1012 } else if (model[0] != 0) {
1013 if ((strstr(model, RELOC("ython")) == NULL) &&
1014 (strstr(model, RELOC("peedwagon")) == NULL) &&
1015 (strstr(model, RELOC("innipeg")) == NULL))
1016 continue;
1017 }
1018
1019 if (prom_getprop(node, "tce-table-minalign", &minalign,
1020 sizeof(minalign)) == PROM_ERROR)
1021 minalign = 0;
1022 if (prom_getprop(node, "tce-table-minsize", &minsize,
1023 sizeof(minsize)) == PROM_ERROR)
1024 minsize = 4UL << 20;
1025
1026 /*
1027 * Even though we read what OF wants, we just set the table
1028 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1029 * By doing this, we avoid the pitfalls of trying to DMA to
1030 * MMIO space and the DMA alias hole.
1031 *
1032 * On POWER4, firmware sets the TCE region by assuming
1033 * each TCE table is 8MB. Using this memory for anything
1034 * else will impact performance, so we always allocate 8MB.
1035 * Anton
1036 */
1037 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1038 minsize = 8UL << 20;
1039 else
1040 minsize = 4UL << 20;
1041
1042 /* Align to the greater of the align or size */
1043 align = max(minalign, minsize);
1044 base = alloc_down(minsize, align, 1);
1045 if (base == 0)
1046 prom_panic("ERROR, cannot find space for TCE table.\n");
1047 if (base < local_alloc_bottom)
1048 local_alloc_bottom = base;
1049
1050 /* Save away the TCE table attributes for later use. */
1051 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1052 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1053
1054 /* It seems OF doesn't null-terminate the path :-( */
1055 memset(path, 0, sizeof(path));
1056 /* Call OF to setup the TCE hardware */
1057 if (call_prom("package-to-path", 3, 1, node,
1058 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1059 prom_printf("package-to-path failed\n");
1060 }
1061
1062 prom_debug("TCE table: %s\n", path);
1063 prom_debug("\tnode = 0x%x\n", node);
1064 prom_debug("\tbase = 0x%x\n", base);
1065 prom_debug("\tsize = 0x%x\n", minsize);
1066
1067 /* Initialize the table to have a one-to-one mapping
1068 * over the allocated size.
1069 */
1070 tce_entryp = (unsigned long *)base;
1071 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1072 tce_entry = (i << PAGE_SHIFT);
1073 tce_entry |= 0x3;
1074 *tce_entryp = tce_entry;
1075 }
1076
1077 prom_printf("opening PHB %s", path);
1078 phb_node = call_prom("open", 1, 1, path);
1079 if (phb_node == 0)
1080 prom_printf("... failed\n");
1081 else
1082 prom_printf("... done\n");
1083
1084 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1085 phb_node, -1, minsize,
1086 (u32) base, (u32) (base >> 32));
1087 call_prom("close", 1, 0, phb_node);
1088 }
1089
1090 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1091
1092 if (RELOC(prom_memory_limit)) {
1093 /*
1094 * We align the start to a 16MB boundary so we can map the TCE area
1095 * using large pages if possible. The end should be the top of RAM
1096 * so no need to align it.
1097 */
1098 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom, 0x1000000);
1099 RELOC(prom_tce_alloc_end) = local_alloc_top;
1100 }
1101
1102 /* Flag the first invalid entry */
1103 prom_debug("ending prom_initialize_tce_table\n");
1104}
1105
1106/*
1107 * With CHRP SMP we need to use the OF to start the other
1108 * processors so we can't wait until smp_boot_cpus (the OF is
1109 * trashed by then) so we have to put the processors into
1110 * a holding pattern controlled by the kernel (not OF) before
1111 * we destroy the OF.
1112 *
1113 * This uses a chunk of low memory, puts some holding pattern
1114 * code there and sends the other processors off to there until
1115 * smp_boot_cpus tells them to do something. The holding pattern
1116 * checks that address until its cpu # is there, when it is that
1117 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1118 * of setting those values.
1119 *
1120 * We also use physical address 0x4 here to tell when a cpu
1121 * is in its holding pattern code.
1122 *
1123 * Fixup comment... DRENG / PPPBBB - Peter
1124 *
1125 * -- Cort
1126 */
1127static void __init prom_hold_cpus(void)
1128{
1129 unsigned long i;
1130 unsigned int reg;
1131 phandle node;
1132 unsigned long offset = reloc_offset();
1133 char type[64];
1134 int cpuid = 0;
1135 unsigned int interrupt_server[MAX_CPU_THREADS];
1136 unsigned int cpu_threads, hw_cpu_num;
1137 int propsize;
1138 extern void __secondary_hold(void);
1139 extern unsigned long __secondary_hold_spinloop;
1140 extern unsigned long __secondary_hold_acknowledge;
1141 unsigned long *spinloop
1142 = (void *)virt_to_abs(&__secondary_hold_spinloop);
1143 unsigned long *acknowledge
1144 = (void *)virt_to_abs(&__secondary_hold_acknowledge);
1145 unsigned long secondary_hold
1146 = virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
1147 struct prom_t *_prom = PTRRELOC(&prom);
1148
1149 prom_debug("prom_hold_cpus: start...\n");
1150 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1151 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1152 prom_debug(" 1) acknowledge = 0x%x\n",
1153 (unsigned long)acknowledge);
1154 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1155 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1156
1157 /* Set the common spinloop variable, so all of the secondary cpus
1158 * will block when they are awakened from their OF spinloop.
1159 * This must occur for both SMP and non SMP kernels, since OF will
1160 * be trashed when we move the kernel.
1161 */
1162 *spinloop = 0;
1163
1164#ifdef CONFIG_HMT
1165 for (i=0; i < NR_CPUS; i++) {
1166 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1167 }
1168#endif
1169 /* look for cpus */
1170 for (node = 0; prom_next_node(&node); ) {
1171 type[0] = 0;
1172 prom_getprop(node, "device_type", type, sizeof(type));
1173 if (strcmp(type, RELOC("cpu")) != 0)
1174 continue;
1175
1176 /* Skip non-configured cpus. */
1177 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1178 if (strcmp(type, RELOC("okay")) != 0)
1179 continue;
1180
1181 reg = -1;
1182 prom_getprop(node, "reg", &reg, sizeof(reg));
1183
1184 prom_debug("\ncpuid = 0x%x\n", cpuid);
1185 prom_debug("cpu hw idx = 0x%x\n", reg);
1186
1187 /* Init the acknowledge var which will be reset by
1188 * the secondary cpu when it awakens from its OF
1189 * spinloop.
1190 */
1191 *acknowledge = (unsigned long)-1;
1192
1193 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1194 &interrupt_server,
1195 sizeof(interrupt_server));
1196 if (propsize < 0) {
1197 /* no property. old hardware has no SMT */
1198 cpu_threads = 1;
1199 interrupt_server[0] = reg; /* fake it with phys id */
1200 } else {
1201 /* We have a threaded processor */
1202 cpu_threads = propsize / sizeof(u32);
1203 if (cpu_threads > MAX_CPU_THREADS) {
1204 prom_printf("SMT: too many threads!\n"
1205 "SMT: found %x, max is %x\n",
1206 cpu_threads, MAX_CPU_THREADS);
1207 cpu_threads = 1; /* ToDo: panic? */
1208 }
1209 }
1210
1211 hw_cpu_num = interrupt_server[0];
1212 if (hw_cpu_num != _prom->cpu) {
1213 /* Primary Thread of non-boot cpu */
1214 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1215 call_prom("start-cpu", 3, 0, node,
1216 secondary_hold, reg);
1217
1218 for ( i = 0 ; (i < 100000000) &&
1219 (*acknowledge == ((unsigned long)-1)); i++ )
1220 mb();
1221
1222 if (*acknowledge == reg) {
1223 prom_printf("done\n");
1224 /* We have to get every CPU out of OF,
1225 * even if we never start it. */
1226 if (cpuid >= NR_CPUS)
1227 goto next;
1228 } else {
1229 prom_printf("failed: %x\n", *acknowledge);
1230 }
1231 }
1232#ifdef CONFIG_SMP
1233 else
1234 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1235#endif
1236next:
1237#ifdef CONFIG_SMP
1238 /* Init paca for secondary threads. They start later. */
1239 for (i=1; i < cpu_threads; i++) {
1240 cpuid++;
1241 if (cpuid >= NR_CPUS)
1242 continue;
1243 }
1244#endif /* CONFIG_SMP */
1245 cpuid++;
1246 }
1247#ifdef CONFIG_HMT
1248 /* Only enable HMT on processors that provide support. */
1249 if (__is_processor(PV_PULSAR) ||
1250 __is_processor(PV_ICESTAR) ||
1251 __is_processor(PV_SSTAR)) {
1252 prom_printf(" starting secondary threads\n");
1253
1254 for (i = 0; i < NR_CPUS; i += 2) {
1255 if (!cpu_online(i))
1256 continue;
1257
1258 if (i == 0) {
1259 unsigned long pir = mfspr(SPRN_PIR);
1260 if (__is_processor(PV_PULSAR)) {
1261 RELOC(hmt_thread_data)[i].pir =
1262 pir & 0x1f;
1263 } else {
1264 RELOC(hmt_thread_data)[i].pir =
1265 pir & 0x3ff;
1266 }
1267 }
1268 }
1269 } else {
1270 prom_printf("Processor is not HMT capable\n");
1271 }
1272#endif
1273
1274 if (cpuid > NR_CPUS)
1275 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1276 ") exceeded: ignoring extras\n");
1277
1278 prom_debug("prom_hold_cpus: end...\n");
1279}
1280
1281
1282static void __init prom_init_client_services(unsigned long pp)
1283{
1284 unsigned long offset = reloc_offset();
1285 struct prom_t *_prom = PTRRELOC(&prom);
1286
1287 /* Get a handle to the prom entry point before anything else */
1288 _prom->entry = pp;
1289
1290 /* Init default value for phys size */
1291 _prom->root_size_cells = 1;
1292 _prom->root_addr_cells = 2;
1293
1294 /* get a handle for the stdout device */
1295 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1296 if (!PHANDLE_VALID(_prom->chosen))
1297 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1298
1299 /* get device tree root */
1300 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1301 if (!PHANDLE_VALID(_prom->root))
1302 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1303}
1304
1305static void __init prom_init_stdout(void)
1306{
1307 unsigned long offset = reloc_offset();
1308 struct prom_t *_prom = PTRRELOC(&prom);
1309 char *path = RELOC(of_stdout_device);
1310 char type[16];
1311 u32 val;
1312
1313 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1314 prom_panic("cannot find stdout");
1315
1316 _prom->stdout = val;
1317
1318 /* Get the full OF pathname of the stdout device */
1319 memset(path, 0, 256);
1320 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1321 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1322 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1323 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1324 prom_setprop(_prom->chosen, "linux,stdout-path",
1325 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1326
1327 /* If it's a display, note it */
1328 memset(type, 0, sizeof(type));
1329 prom_getprop(val, "device_type", type, sizeof(type));
1330 if (strcmp(type, RELOC("display")) == 0) {
1331 _prom->disp_node = val;
1332 prom_setprop(val, "linux,boot-display", NULL, 0);
1333 }
1334}
1335
1336static void __init prom_close_stdin(void)
1337{
1338 unsigned long offset = reloc_offset();
1339 struct prom_t *_prom = PTRRELOC(&prom);
1340 ihandle val;
1341
1342 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1343 call_prom("close", 1, 0, val);
1344}
1345
1346static int __init prom_find_machine_type(void)
1347{
1348 unsigned long offset = reloc_offset();
1349 struct prom_t *_prom = PTRRELOC(&prom);
1350 char compat[256];
1351 int len, i = 0;
1352 phandle rtas;
1353
1354 len = prom_getprop(_prom->root, "compatible",
1355 compat, sizeof(compat)-1);
1356 if (len > 0) {
1357 compat[len] = 0;
1358 while (i < len) {
1359 char *p = &compat[i];
1360 int sl = strlen(p);
1361 if (sl == 0)
1362 break;
1363 if (strstr(p, RELOC("Power Macintosh")) ||
1364 strstr(p, RELOC("MacRISC4")))
1365 return PLATFORM_POWERMAC;
1366 if (strstr(p, RELOC("Momentum,Maple")))
1367 return PLATFORM_MAPLE;
1368 i += sl + 1;
1369 }
1370 }
1371 /* Default to pSeries. We need to know if we are running LPAR */
1372 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1373 if (PHANDLE_VALID(rtas)) {
1374 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1375 if (x != PROM_ERROR) {
1376 prom_printf("Hypertas detected, assuming LPAR !\n");
1377 return PLATFORM_PSERIES_LPAR;
1378 }
1379 }
1380 return PLATFORM_PSERIES;
1381}
1382
1383static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1384{
1385 unsigned long offset = reloc_offset();
1386
1387 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1388}
1389
1390/*
1391 * If we have a display that we don't know how to drive,
1392 * we will want to try to execute OF's open method for it
1393 * later. However, OF will probably fall over if we do that
1394 * we've taken over the MMU.
1395 * So we check whether we will need to open the display,
1396 * and if so, open it now.
1397 */
1398static void __init prom_check_displays(void)
1399{
1400 unsigned long offset = reloc_offset();
1401 struct prom_t *_prom = PTRRELOC(&prom);
1402 char type[16], *path;
1403 phandle node;
1404 ihandle ih;
1405 int i;
1406
1407 static unsigned char default_colors[] = {
1408 0x00, 0x00, 0x00,
1409 0x00, 0x00, 0xaa,
1410 0x00, 0xaa, 0x00,
1411 0x00, 0xaa, 0xaa,
1412 0xaa, 0x00, 0x00,
1413 0xaa, 0x00, 0xaa,
1414 0xaa, 0xaa, 0x00,
1415 0xaa, 0xaa, 0xaa,
1416 0x55, 0x55, 0x55,
1417 0x55, 0x55, 0xff,
1418 0x55, 0xff, 0x55,
1419 0x55, 0xff, 0xff,
1420 0xff, 0x55, 0x55,
1421 0xff, 0x55, 0xff,
1422 0xff, 0xff, 0x55,
1423 0xff, 0xff, 0xff
1424 };
1425 const unsigned char *clut;
1426
1427 prom_printf("Looking for displays\n");
1428 for (node = 0; prom_next_node(&node); ) {
1429 memset(type, 0, sizeof(type));
1430 prom_getprop(node, "device_type", type, sizeof(type));
1431 if (strcmp(type, RELOC("display")) != 0)
1432 continue;
1433
1434 /* It seems OF doesn't null-terminate the path :-( */
1435 path = RELOC(prom_scratch);
1436 memset(path, 0, PROM_SCRATCH_SIZE);
1437
1438 /*
1439 * leave some room at the end of the path for appending extra
1440 * arguments
1441 */
1442 if (call_prom("package-to-path", 3, 1, node, path,
1443 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1444 continue;
1445 prom_printf("found display : %s, opening ... ", path);
1446
1447 ih = call_prom("open", 1, 1, path);
1448 if (ih == 0) {
1449 prom_printf("failed\n");
1450 continue;
1451 }
1452
1453 /* Success */
1454 prom_printf("done\n");
1455 prom_setprop(node, "linux,opened", NULL, 0);
1456
1457 /*
1458 * stdout wasn't a display node, pick the first we can find
1459 * for btext
1460 */
1461 if (_prom->disp_node == 0)
1462 _prom->disp_node = node;
1463
1464 /* Setup a useable color table when the appropriate
1465 * method is available. Should update this to set-colors */
1466 clut = RELOC(default_colors);
1467 for (i = 0; i < 32; i++, clut += 3)
1468 if (prom_set_color(ih, i, clut[0], clut[1],
1469 clut[2]) != 0)
1470 break;
1471
1472#ifdef CONFIG_LOGO_LINUX_CLUT224
1473 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1474 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1475 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1476 clut[2]) != 0)
1477 break;
1478#endif /* CONFIG_LOGO_LINUX_CLUT224 */
1479 }
1480}
1481
1482
1483/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1484static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1485 unsigned long needed, unsigned long align)
1486{
1487 unsigned long offset = reloc_offset();
1488 void *ret;
1489
1490 *mem_start = _ALIGN(*mem_start, align);
1491 while ((*mem_start + needed) > *mem_end) {
1492 unsigned long room, chunk;
1493
1494 prom_debug("Chunk exhausted, claiming more at %x...\n",
1495 RELOC(alloc_bottom));
1496 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1497 if (room > DEVTREE_CHUNK_SIZE)
1498 room = DEVTREE_CHUNK_SIZE;
1499 if (room < PAGE_SIZE)
1500 prom_panic("No memory for flatten_device_tree (no room)");
1501 chunk = alloc_up(room, 0);
1502 if (chunk == 0)
1503 prom_panic("No memory for flatten_device_tree (claim failed)");
1504 *mem_end = RELOC(alloc_top);
1505 }
1506
1507 ret = (void *)*mem_start;
1508 *mem_start += needed;
1509
1510 return ret;
1511}
1512
1513#define dt_push_token(token, mem_start, mem_end) \
1514 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1515
1516static unsigned long __init dt_find_string(char *str)
1517{
1518 unsigned long offset = reloc_offset();
1519 char *s, *os;
1520
1521 s = os = (char *)RELOC(dt_string_start);
1522 s += 4;
1523 while (s < (char *)RELOC(dt_string_end)) {
1524 if (strcmp(s, str) == 0)
1525 return s - os;
1526 s += strlen(s) + 1;
1527 }
1528 return 0;
1529}
1530
1531/*
1532 * The Open Firmware 1275 specification states properties must be 31 bytes or
1533 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1534 */
1535#define MAX_PROPERTY_NAME 64
1536
1537static void __init scan_dt_build_strings(phandle node,
1538 unsigned long *mem_start,
1539 unsigned long *mem_end)
1540{
1541 unsigned long offset = reloc_offset();
1542 char *prev_name, *namep, *sstart;
1543 unsigned long soff;
1544 phandle child;
1545
1546 sstart = (char *)RELOC(dt_string_start);
1547
1548 /* get and store all property names */
1549 prev_name = RELOC("");
1550 for (;;) {
1551 /* 64 is max len of name including nul. */
1552 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1553 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1554 /* No more nodes: unwind alloc */
1555 *mem_start = (unsigned long)namep;
1556 break;
1557 }
1558
1559 /* skip "name" */
1560 if (strcmp(namep, RELOC("name")) == 0) {
1561 *mem_start = (unsigned long)namep;
1562 prev_name = RELOC("name");
1563 continue;
1564 }
1565 /* get/create string entry */
1566 soff = dt_find_string(namep);
1567 if (soff != 0) {
1568 *mem_start = (unsigned long)namep;
1569 namep = sstart + soff;
1570 } else {
1571 /* Trim off some if we can */
1572 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1573 RELOC(dt_string_end) = *mem_start;
1574 }
1575 prev_name = namep;
1576 }
1577
1578 /* do all our children */
1579 child = call_prom("child", 1, 1, node);
1580 while (child != 0) {
1581 scan_dt_build_strings(child, mem_start, mem_end);
1582 child = call_prom("peer", 1, 1, child);
1583 }
1584}
1585
1586static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1587 unsigned long *mem_end)
1588{
1589 phandle child;
1590 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1591 unsigned long soff;
1592 unsigned char *valp;
1593 unsigned long offset = reloc_offset();
1594 static char pname[MAX_PROPERTY_NAME];
1595 int l;
1596
1597 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1598
1599 /* get the node's full name */
1600 namep = (char *)*mem_start;
1601 l = call_prom("package-to-path", 3, 1, node,
1602 namep, *mem_end - *mem_start);
1603 if (l >= 0) {
1604 /* Didn't fit? Get more room. */
1605 if ((l+1) > (*mem_end - *mem_start)) {
1606 namep = make_room(mem_start, mem_end, l+1, 1);
1607 call_prom("package-to-path", 3, 1, node, namep, l);
1608 }
1609 namep[l] = '\0';
1610
1611 /* Fixup an Apple bug where they have bogus \0 chars in the
1612 * middle of the path in some properties
1613 */
1614 for (p = namep, ep = namep + l; p < ep; p++)
1615 if (*p == '\0') {
1616 memmove(p, p+1, ep - p);
1617 ep--; l--; p--;
1618 }
1619
1620 /* now try to extract the unit name in that mess */
1621 for (p = namep, lp = NULL; *p; p++)
1622 if (*p == '/')
1623 lp = p + 1;
1624 if (lp != NULL)
1625 memmove(namep, lp, strlen(lp) + 1);
1626 *mem_start = _ALIGN(((unsigned long) namep) +
1627 strlen(namep) + 1, 4);
1628 }
1629
1630 /* get it again for debugging */
1631 path = RELOC(prom_scratch);
1632 memset(path, 0, PROM_SCRATCH_SIZE);
1633 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1634
1635 /* get and store all properties */
1636 prev_name = RELOC("");
1637 sstart = (char *)RELOC(dt_string_start);
1638 for (;;) {
1639 if (call_prom("nextprop", 3, 1, node, prev_name,
1640 RELOC(pname)) != 1)
1641 break;
1642
1643 /* skip "name" */
1644 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1645 prev_name = RELOC("name");
1646 continue;
1647 }
1648
1649 /* find string offset */
1650 soff = dt_find_string(RELOC(pname));
1651 if (soff == 0) {
1652 prom_printf("WARNING: Can't find string index for"
1653 " <%s>, node %s\n", RELOC(pname), path);
1654 break;
1655 }
1656 prev_name = sstart + soff;
1657
1658 /* get length */
1659 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1660
1661 /* sanity checks */
1662 if (l == PROM_ERROR)
1663 continue;
1664 if (l > MAX_PROPERTY_LENGTH) {
1665 prom_printf("WARNING: ignoring large property ");
1666 /* It seems OF doesn't null-terminate the path :-( */
1667 prom_printf("[%s] ", path);
1668 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1669 continue;
1670 }
1671
1672 /* push property head */
1673 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1674 dt_push_token(l, mem_start, mem_end);
1675 dt_push_token(soff, mem_start, mem_end);
1676
1677 /* push property content */
1678 valp = make_room(mem_start, mem_end, l, 4);
1679 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1680 *mem_start = _ALIGN(*mem_start, 4);
1681 }
1682
1683 /* Add a "linux,phandle" property. */
1684 soff = dt_find_string(RELOC("linux,phandle"));
1685 if (soff == 0)
1686 prom_printf("WARNING: Can't find string index for"
1687 " <linux-phandle> node %s\n", path);
1688 else {
1689 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1690 dt_push_token(4, mem_start, mem_end);
1691 dt_push_token(soff, mem_start, mem_end);
1692 valp = make_room(mem_start, mem_end, 4, 4);
1693 *(u32 *)valp = node;
1694 }
1695
1696 /* do all our children */
1697 child = call_prom("child", 1, 1, node);
1698 while (child != 0) {
1699 scan_dt_build_struct(child, mem_start, mem_end);
1700 child = call_prom("peer", 1, 1, child);
1701 }
1702
1703 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1704}
1705
1706static void __init flatten_device_tree(void)
1707{
1708 phandle root;
1709 unsigned long offset = reloc_offset();
1710 unsigned long mem_start, mem_end, room;
1711 struct boot_param_header *hdr;
1712 struct prom_t *_prom = PTRRELOC(&prom);
1713 char *namep;
1714 u64 *rsvmap;
1715
1716 /*
1717 * Check how much room we have between alloc top & bottom (+/- a
1718 * few pages), crop to 4Mb, as this is our "chuck" size
1719 */
1720 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1721 if (room > DEVTREE_CHUNK_SIZE)
1722 room = DEVTREE_CHUNK_SIZE;
1723 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1724
1725 /* Now try to claim that */
1726 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1727 if (mem_start == 0)
1728 prom_panic("Can't allocate initial device-tree chunk\n");
1729 mem_end = RELOC(alloc_top);
1730
1731 /* Get root of tree */
1732 root = call_prom("peer", 1, 1, (phandle)0);
1733 if (root == (phandle)0)
1734 prom_panic ("couldn't get device tree root\n");
1735
1736 /* Build header and make room for mem rsv map */
1737 mem_start = _ALIGN(mem_start, 4);
1738 hdr = make_room(&mem_start, &mem_end,
1739 sizeof(struct boot_param_header), 4);
1740 RELOC(dt_header_start) = (unsigned long)hdr;
1741 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1742
1743 /* Start of strings */
1744 mem_start = PAGE_ALIGN(mem_start);
1745 RELOC(dt_string_start) = mem_start;
1746 mem_start += 4; /* hole */
1747
1748 /* Add "linux,phandle" in there, we'll need it */
1749 namep = make_room(&mem_start, &mem_end, 16, 1);
1750 strcpy(namep, RELOC("linux,phandle"));
1751 mem_start = (unsigned long)namep + strlen(namep) + 1;
1752
1753 /* Build string array */
1754 prom_printf("Building dt strings...\n");
1755 scan_dt_build_strings(root, &mem_start, &mem_end);
1756 RELOC(dt_string_end) = mem_start;
1757
1758 /* Build structure */
1759 mem_start = PAGE_ALIGN(mem_start);
1760 RELOC(dt_struct_start) = mem_start;
1761 prom_printf("Building dt structure...\n");
1762 scan_dt_build_struct(root, &mem_start, &mem_end);
1763 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1764 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1765
1766 /* Finish header */
1767 hdr->boot_cpuid_phys = _prom->cpu;
1768 hdr->magic = OF_DT_HEADER;
1769 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1770 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1771 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1772 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1773 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1774 hdr->version = OF_DT_VERSION;
1775 /* Version 16 is not backward compatible */
1776 hdr->last_comp_version = 0x10;
1777
1778 /* Reserve the whole thing and copy the reserve map in, we
1779 * also bump mem_reserve_cnt to cause further reservations to
1780 * fail since it's too late.
1781 */
1782 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1783 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1784
1785#ifdef DEBUG_PROM
1786 {
1787 int i;
1788 prom_printf("reserved memory map:\n");
1789 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1790 prom_printf(" %x - %x\n", RELOC(mem_reserve_map)[i].base,
1791 RELOC(mem_reserve_map)[i].size);
1792 }
1793#endif
1794 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1795
1796 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1797 RELOC(dt_string_start), RELOC(dt_string_end));
1798 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1799 RELOC(dt_struct_start), RELOC(dt_struct_end));
1800
1801}
1802
1803
1804static void __init fixup_device_tree(void)
1805{
1806 unsigned long offset = reloc_offset();
1807 phandle u3, i2c, mpic;
1808 u32 u3_rev;
1809 u32 interrupts[2];
1810 u32 parent;
1811
1812 /* Some G5s have a missing interrupt definition, fix it up here */
1813 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1814 if (!PHANDLE_VALID(u3))
1815 return;
1816 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1817 if (!PHANDLE_VALID(i2c))
1818 return;
1819 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1820 if (!PHANDLE_VALID(mpic))
1821 return;
1822
1823 /* check if proper rev of u3 */
1824 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1825 == PROM_ERROR)
1826 return;
1827 if (u3_rev < 0x35 || u3_rev > 0x39)
1828 return;
1829 /* does it need fixup ? */
1830 if (prom_getproplen(i2c, "interrupts") > 0)
1831 return;
1832
1833 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1834
1835 /* interrupt on this revision of u3 is number 0 and level */
1836 interrupts[0] = 0;
1837 interrupts[1] = 1;
1838 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1839 parent = (u32)mpic;
1840 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1841}
1842
1843
1844static void __init prom_find_boot_cpu(void)
1845{
1846 unsigned long offset = reloc_offset();
1847 struct prom_t *_prom = PTRRELOC(&prom);
1848 u32 getprop_rval;
1849 ihandle prom_cpu;
1850 phandle cpu_pkg;
1851
1852 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1853 prom_panic("cannot find boot cpu");
1854
1855 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1856
1857 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1858 _prom->cpu = getprop_rval;
1859
1860 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1861}
1862
1863static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1864{
1865#ifdef CONFIG_BLK_DEV_INITRD
1866 unsigned long offset = reloc_offset();
1867 struct prom_t *_prom = PTRRELOC(&prom);
1868
1869 if ( r3 && r4 && r4 != 0xdeadbeef) {
1870 u64 val;
1871
1872 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1873 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1874
1875 val = (u64)RELOC(prom_initrd_start);
1876 prom_setprop(_prom->chosen, "linux,initrd-start", &val, sizeof(val));
1877 val = (u64)RELOC(prom_initrd_end);
1878 prom_setprop(_prom->chosen, "linux,initrd-end", &val, sizeof(val));
1879
1880 reserve_mem(RELOC(prom_initrd_start),
1881 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1882
1883 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1884 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1885 }
1886#endif /* CONFIG_BLK_DEV_INITRD */
1887}
1888
1889/*
1890 * We enter here early on, when the Open Firmware prom is still
1891 * handling exceptions and the MMU hash table for us.
1892 */
1893
1894unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long pp,
1895 unsigned long r6, unsigned long r7)
1896{
1897 unsigned long offset = reloc_offset();
1898 struct prom_t *_prom = PTRRELOC(&prom);
1899 unsigned long phys = KERNELBASE - offset;
1900 u32 getprop_rval;
1901
1902 /*
1903 * First zero the BSS
1904 */
1905 memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
1906
1907 /*
1908 * Init interface to Open Firmware, get some node references,
1909 * like /chosen
1910 */
1911 prom_init_client_services(pp);
1912
1913 /*
1914 * Init prom stdout device
1915 */
1916 prom_init_stdout();
1917 prom_debug("klimit=0x%x\n", RELOC(klimit));
1918 prom_debug("offset=0x%x\n", offset);
1919
1920 /*
1921 * Check for an initrd
1922 */
1923 prom_check_initrd(r3, r4);
1924
1925 /*
1926 * Get default machine type. At this point, we do not differenciate
1927 * between pSeries SMP and pSeries LPAR
1928 */
1929 RELOC(of_platform) = prom_find_machine_type();
1930 getprop_rval = RELOC(of_platform);
1931 prom_setprop(_prom->chosen, "linux,platform",
1932 &getprop_rval, sizeof(getprop_rval));
1933
1934 /*
1935 * On pSeries, inform the firmware about our capabilities
1936 */
1937 if (RELOC(of_platform) == PLATFORM_PSERIES ||
1938 RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
1939 prom_send_capabilities();
1940
1941 /*
1942 * On pSeries and Cell, copy the CPU hold code
1943 */
1944 if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_CELL))
1945 copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
1946
1947 /*
1948 * Get memory cells format
1949 */
1950 getprop_rval = 1;
1951 prom_getprop(_prom->root, "#size-cells",
1952 &getprop_rval, sizeof(getprop_rval));
1953 _prom->root_size_cells = getprop_rval;
1954 getprop_rval = 2;
1955 prom_getprop(_prom->root, "#address-cells",
1956 &getprop_rval, sizeof(getprop_rval));
1957 _prom->root_addr_cells = getprop_rval;
1958
1959 /*
1960 * Do early parsing of command line
1961 */
1962 early_cmdline_parse();
1963
1964 /*
1965 * Initialize memory management within prom_init
1966 */
1967 prom_init_mem();
1968
1969 /*
1970 * Determine which cpu is actually running right _now_
1971 */
1972 prom_find_boot_cpu();
1973
1974 /*
1975 * Initialize display devices
1976 */
1977 prom_check_displays();
1978
1979 /*
1980 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
1981 * that uses the allocator, we need to make sure we get the top of memory
1982 * available for us here...
1983 */
1984 if (RELOC(of_platform) == PLATFORM_PSERIES)
1985 prom_initialize_tce_table();
1986
1987 /*
1988 * On non-powermacs, try to instantiate RTAS and puts all CPUs
1989 * in spin-loops. PowerMacs don't have a working RTAS and use
1990 * a different way to spin CPUs
1991 */
1992 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
1993 prom_instantiate_rtas();
1994 prom_hold_cpus();
1995 }
1996
1997 /*
1998 * Fill in some infos for use by the kernel later on
1999 */
2000 if (RELOC(ppc64_iommu_off))
2001 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2002
2003 if (RELOC(iommu_force_on))
2004 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2005
2006 if (RELOC(prom_memory_limit))
2007 prom_setprop(_prom->chosen, "linux,memory-limit",
2008 PTRRELOC(&prom_memory_limit), sizeof(RELOC(prom_memory_limit)));
2009
2010 if (RELOC(prom_tce_alloc_start)) {
2011 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2012 PTRRELOC(&prom_tce_alloc_start), sizeof(RELOC(prom_tce_alloc_start)));
2013 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2014 PTRRELOC(&prom_tce_alloc_end), sizeof(RELOC(prom_tce_alloc_end)));
2015 }
2016
2017 /*
2018 * Fixup any known bugs in the device-tree
2019 */
2020 fixup_device_tree();
2021
2022 /*
2023 * Now finally create the flattened device-tree
2024 */
2025 prom_printf("copying OF device tree ...\n");
2026 flatten_device_tree();
2027
2028 /* in case stdin is USB and still active on IBM machines... */
2029 prom_close_stdin();
2030
2031 /*
2032 * Call OF "quiesce" method to shut down pending DMA's from
2033 * devices etc...
2034 */
2035 prom_printf("Calling quiesce ...\n");
2036 call_prom("quiesce", 0, 0);
2037
2038 /*
2039 * And finally, call the kernel passing it the flattened device
2040 * tree and NULL as r5, thus triggering the new entry point which
2041 * is common to us and kexec
2042 */
2043 prom_printf("returning from prom_init\n");
2044 prom_debug("->dt_header_start=0x%x\n", RELOC(dt_header_start));
2045 prom_debug("->phys=0x%x\n", phys);
2046
2047 __start(RELOC(dt_header_start), phys, 0);
2048
2049 return 0;
2050}
2051
diff --git a/arch/ppc64/kernel/semaphore.c b/arch/ppc64/kernel/semaphore.c
deleted file mode 100644
index a1c1db573e9c..000000000000
--- a/arch/ppc64/kernel/semaphore.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 *
3 *
4 * PowerPC-specific semaphore code.
5 *
6 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
14 * to eliminate the SMP races in the old version between the updates
15 * of `count' and `waking'. Now we use negative `count' values to
16 * indicate that some process(es) are waiting for the semaphore.
17 */
18
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <linux/module.h>
22
23#include <asm/atomic.h>
24#include <asm/semaphore.h>
25#include <asm/errno.h>
26
27/*
28 * Atomically update sem->count.
29 * This does the equivalent of the following:
30 *
31 * old_count = sem->count;
32 * tmp = MAX(old_count, 0) + incr;
33 * sem->count = tmp;
34 * return old_count;
35 */
36static inline int __sem_update_count(struct semaphore *sem, int incr)
37{
38 int old_count, tmp;
39
40 __asm__ __volatile__("\n"
41"1: lwarx %0,0,%3\n"
42" srawi %1,%0,31\n"
43" andc %1,%0,%1\n"
44" add %1,%1,%4\n"
45" stwcx. %1,0,%3\n"
46" bne 1b"
47 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
48 : "r" (&sem->count), "r" (incr), "m" (sem->count)
49 : "cc");
50
51 return old_count;
52}
53
54void __up(struct semaphore *sem)
55{
56 /*
57 * Note that we incremented count in up() before we came here,
58 * but that was ineffective since the result was <= 0, and
59 * any negative value of count is equivalent to 0.
60 * This ends up setting count to 1, unless count is now > 0
61 * (i.e. because some other cpu has called up() in the meantime),
62 * in which case we just increment count.
63 */
64 __sem_update_count(sem, 1);
65 wake_up(&sem->wait);
66}
67EXPORT_SYMBOL(__up);
68
69/*
70 * Note that when we come in to __down or __down_interruptible,
71 * we have already decremented count, but that decrement was
72 * ineffective since the result was < 0, and any negative value
73 * of count is equivalent to 0.
74 * Thus it is only when we decrement count from some value > 0
75 * that we have actually got the semaphore.
76 */
77void __sched __down(struct semaphore *sem)
78{
79 struct task_struct *tsk = current;
80 DECLARE_WAITQUEUE(wait, tsk);
81
82 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
83 add_wait_queue_exclusive(&sem->wait, &wait);
84
85 /*
86 * Try to get the semaphore. If the count is > 0, then we've
87 * got the semaphore; we decrement count and exit the loop.
88 * If the count is 0 or negative, we set it to -1, indicating
89 * that we are asleep, and then sleep.
90 */
91 while (__sem_update_count(sem, -1) <= 0) {
92 schedule();
93 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
94 }
95 remove_wait_queue(&sem->wait, &wait);
96 __set_task_state(tsk, TASK_RUNNING);
97
98 /*
99 * If there are any more sleepers, wake one of them up so
100 * that it can either get the semaphore, or set count to -1
101 * indicating that there are still processes sleeping.
102 */
103 wake_up(&sem->wait);
104}
105EXPORT_SYMBOL(__down);
106
107int __sched __down_interruptible(struct semaphore * sem)
108{
109 int retval = 0;
110 struct task_struct *tsk = current;
111 DECLARE_WAITQUEUE(wait, tsk);
112
113 __set_task_state(tsk, TASK_INTERRUPTIBLE);
114 add_wait_queue_exclusive(&sem->wait, &wait);
115
116 while (__sem_update_count(sem, -1) <= 0) {
117 if (signal_pending(current)) {
118 /*
119 * A signal is pending - give up trying.
120 * Set sem->count to 0 if it is negative,
121 * since we are no longer sleeping.
122 */
123 __sem_update_count(sem, 0);
124 retval = -EINTR;
125 break;
126 }
127 schedule();
128 set_task_state(tsk, TASK_INTERRUPTIBLE);
129 }
130 remove_wait_queue(&sem->wait, &wait);
131 __set_task_state(tsk, TASK_RUNNING);
132
133 wake_up(&sem->wait);
134 return retval;
135}
136EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c
deleted file mode 100644
index 1bbacac44988..000000000000
--- a/arch/ppc64/kernel/vdso.c
+++ /dev/null
@@ -1,625 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/vdso.c
3 *
4 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/elf.h>
26#include <linux/security.h>
27#include <linux/bootmem.h>
28
29#include <asm/pgtable.h>
30#include <asm/system.h>
31#include <asm/processor.h>
32#include <asm/mmu.h>
33#include <asm/mmu_context.h>
34#include <asm/machdep.h>
35#include <asm/cputable.h>
36#include <asm/sections.h>
37#include <asm/systemcfg.h>
38#include <asm/vdso.h>
39
40#undef DEBUG
41
42#ifdef DEBUG
43#define DBG(fmt...) printk(fmt)
44#else
45#define DBG(fmt...)
46#endif
47
48
49/*
50 * The vDSOs themselves are here
51 */
52extern char vdso64_start, vdso64_end;
53extern char vdso32_start, vdso32_end;
54
55static void *vdso64_kbase = &vdso64_start;
56static void *vdso32_kbase = &vdso32_start;
57
58unsigned int vdso64_pages;
59unsigned int vdso32_pages;
60
61/* Signal trampolines user addresses */
62
63unsigned long vdso64_rt_sigtramp;
64unsigned long vdso32_sigtramp;
65unsigned long vdso32_rt_sigtramp;
66
67/* Format of the patch table */
68struct vdso_patch_def
69{
70 u32 pvr_mask, pvr_value;
71 const char *gen_name;
72 const char *fix_name;
73};
74
75/* Table of functions to patch based on the CPU type/revision
76 *
77 * TODO: Improve by adding whole lists for each entry
78 */
79static struct vdso_patch_def vdso_patches[] = {
80 {
81 0xffff0000, 0x003a0000, /* POWER5 */
82 "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
83 },
84 {
85 0xffff0000, 0x003b0000, /* POWER5 */
86 "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
87 },
88};
89
90/*
91 * Some infos carried around for each of them during parsing at
92 * boot time.
93 */
94struct lib32_elfinfo
95{
96 Elf32_Ehdr *hdr; /* ptr to ELF */
97 Elf32_Sym *dynsym; /* ptr to .dynsym section */
98 unsigned long dynsymsize; /* size of .dynsym section */
99 char *dynstr; /* ptr to .dynstr section */
100 unsigned long text; /* offset of .text section in .so */
101};
102
103struct lib64_elfinfo
104{
105 Elf64_Ehdr *hdr;
106 Elf64_Sym *dynsym;
107 unsigned long dynsymsize;
108 char *dynstr;
109 unsigned long text;
110};
111
112
113#ifdef __DEBUG
114static void dump_one_vdso_page(struct page *pg, struct page *upg)
115{
116 printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
117 page_count(pg),
118 pg->flags);
119 if (upg/* && pg != upg*/) {
120 printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT),
121 page_count(upg),
122 upg->flags);
123 }
124 printk("\n");
125}
126
127static void dump_vdso_pages(struct vm_area_struct * vma)
128{
129 int i;
130
131 if (!vma || test_thread_flag(TIF_32BIT)) {
132 printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
133 for (i=0; i<vdso32_pages; i++) {
134 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
135 struct page *upg = (vma && vma->vm_mm) ?
136 follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
137 : NULL;
138 dump_one_vdso_page(pg, upg);
139 }
140 }
141 if (!vma || !test_thread_flag(TIF_32BIT)) {
142 printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
143 for (i=0; i<vdso64_pages; i++) {
144 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
145 struct page *upg = (vma && vma->vm_mm) ?
146 follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
147 : NULL;
148 dump_one_vdso_page(pg, upg);
149 }
150 }
151}
152#endif /* DEBUG */
153
154/*
155 * Keep a dummy vma_close for now, it will prevent VMA merging.
156 */
157static void vdso_vma_close(struct vm_area_struct * vma)
158{
159}
160
161/*
162 * Our nopage() function, maps in the actual vDSO kernel pages, they will
163 * be mapped read-only by do_no_page(), and eventually COW'ed, either
164 * right away for an initial write access, or by do_wp_page().
165 */
166static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
167 unsigned long address, int *type)
168{
169 unsigned long offset = address - vma->vm_start;
170 struct page *pg;
171 void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase;
172
173 DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n",
174 current->comm, address, offset);
175
176 if (address < vma->vm_start || address > vma->vm_end)
177 return NOPAGE_SIGBUS;
178
179 /*
180 * Last page is systemcfg.
181 */
182 if ((vma->vm_end - address) <= PAGE_SIZE)
183 pg = virt_to_page(_systemcfg);
184 else
185 pg = virt_to_page(vbase + offset);
186
187 get_page(pg);
188 DBG(" ->page count: %d\n", page_count(pg));
189
190 return pg;
191}
192
193static struct vm_operations_struct vdso_vmops = {
194 .close = vdso_vma_close,
195 .nopage = vdso_vma_nopage,
196};
197
198/*
199 * This is called from binfmt_elf, we create the special vma for the
200 * vDSO and insert it into the mm struct tree
201 */
202int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
203{
204 struct mm_struct *mm = current->mm;
205 struct vm_area_struct *vma;
206 unsigned long vdso_pages;
207 unsigned long vdso_base;
208
209 if (test_thread_flag(TIF_32BIT)) {
210 vdso_pages = vdso32_pages;
211 vdso_base = VDSO32_MBASE;
212 } else {
213 vdso_pages = vdso64_pages;
214 vdso_base = VDSO64_MBASE;
215 }
216
217 current->thread.vdso_base = 0;
218
219 /* vDSO has a problem and was disabled, just don't "enable" it for the
220 * process
221 */
222 if (vdso_pages == 0)
223 return 0;
224
225 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
226 if (vma == NULL)
227 return -ENOMEM;
228
229 memset(vma, 0, sizeof(*vma));
230
231 /*
232 * pick a base address for the vDSO in process space. We try to put it
233 * at vdso_base which is the "natural" base for it, but we might fail
234 * and end up putting it elsewhere.
235 */
236 vdso_base = get_unmapped_area(NULL, vdso_base,
237 vdso_pages << PAGE_SHIFT, 0, 0);
238 if (vdso_base & ~PAGE_MASK) {
239 kmem_cache_free(vm_area_cachep, vma);
240 return (int)vdso_base;
241 }
242
243 current->thread.vdso_base = vdso_base;
244
245 vma->vm_mm = mm;
246 vma->vm_start = current->thread.vdso_base;
247
248 /*
249 * the VMA size is one page more than the vDSO since systemcfg
250 * is mapped in the last one
251 */
252 vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT);
253
254 /*
255 * our vma flags don't have VM_WRITE so by default, the process isn't allowed
256 * to write those pages.
257 * gdb can break that with ptrace interface, and thus trigger COW on those
258 * pages but it's then your responsibility to never do that on the "data" page
259 * of the vDSO or you'll stop getting kernel updates and your nice userland
260 * gettimeofday will be totally dead. It's fine to use that for setting
261 * breakpoints in the vDSO code pages though
262 */
263 vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED;
264 vma->vm_flags |= mm->def_flags;
265 vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
266 vma->vm_ops = &vdso_vmops;
267
268 down_write(&mm->mmap_sem);
269 if (insert_vm_struct(mm, vma)) {
270 up_write(&mm->mmap_sem);
271 kmem_cache_free(vm_area_cachep, vma);
272 return -ENOMEM;
273 }
274 mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
275 up_write(&mm->mmap_sem);
276
277 return 0;
278}
279
280static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
281 unsigned long *size)
282{
283 Elf32_Shdr *sechdrs;
284 unsigned int i;
285 char *secnames;
286
287 /* Grab section headers and strings so we can tell who is who */
288 sechdrs = (void *)ehdr + ehdr->e_shoff;
289 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
290
291 /* Find the section they want */
292 for (i = 1; i < ehdr->e_shnum; i++) {
293 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
294 if (size)
295 *size = sechdrs[i].sh_size;
296 return (void *)ehdr + sechdrs[i].sh_offset;
297 }
298 }
299 *size = 0;
300 return NULL;
301}
302
303static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
304 unsigned long *size)
305{
306 Elf64_Shdr *sechdrs;
307 unsigned int i;
308 char *secnames;
309
310 /* Grab section headers and strings so we can tell who is who */
311 sechdrs = (void *)ehdr + ehdr->e_shoff;
312 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
313
314 /* Find the section they want */
315 for (i = 1; i < ehdr->e_shnum; i++) {
316 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
317 if (size)
318 *size = sechdrs[i].sh_size;
319 return (void *)ehdr + sechdrs[i].sh_offset;
320 }
321 }
322 if (size)
323 *size = 0;
324 return NULL;
325}
326
327static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname)
328{
329 unsigned int i;
330 char name[32], *c;
331
332 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
333 if (lib->dynsym[i].st_name == 0)
334 continue;
335 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
336 c = strchr(name, '@');
337 if (c)
338 *c = 0;
339 if (strcmp(symname, name) == 0)
340 return &lib->dynsym[i];
341 }
342 return NULL;
343}
344
345static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname)
346{
347 unsigned int i;
348 char name[32], *c;
349
350 for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
351 if (lib->dynsym[i].st_name == 0)
352 continue;
353 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
354 c = strchr(name, '@');
355 if (c)
356 *c = 0;
357 if (strcmp(symname, name) == 0)
358 return &lib->dynsym[i];
359 }
360 return NULL;
361}
362
363/* Note that we assume the section is .text and the symbol is relative to
364 * the library base
365 */
366static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname)
367{
368 Elf32_Sym *sym = find_symbol32(lib, symname);
369
370 if (sym == NULL) {
371 printk(KERN_WARNING "vDSO32: function %s not found !\n", symname);
372 return 0;
373 }
374 return sym->st_value - VDSO32_LBASE;
375}
376
377/* Note that we assume the section is .text and the symbol is relative to
378 * the library base
379 */
380static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname)
381{
382 Elf64_Sym *sym = find_symbol64(lib, symname);
383
384 if (sym == NULL) {
385 printk(KERN_WARNING "vDSO64: function %s not found !\n", symname);
386 return 0;
387 }
388#ifdef VDS64_HAS_DESCRIPTORS
389 return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE;
390#else
391 return sym->st_value - VDSO64_LBASE;
392#endif
393}
394
395
396static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
397 struct lib64_elfinfo *v64)
398{
399 void *sect;
400
401 /*
402 * Locate symbol tables & text section
403 */
404
405 v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
406 v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
407 if (v32->dynsym == NULL || v32->dynstr == NULL) {
408 printk(KERN_ERR "vDSO32: a required symbol section was not found\n");
409 return -1;
410 }
411 sect = find_section32(v32->hdr, ".text", NULL);
412 if (sect == NULL) {
413 printk(KERN_ERR "vDSO32: the .text section was not found\n");
414 return -1;
415 }
416 v32->text = sect - vdso32_kbase;
417
418 v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
419 v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
420 if (v64->dynsym == NULL || v64->dynstr == NULL) {
421 printk(KERN_ERR "vDSO64: a required symbol section was not found\n");
422 return -1;
423 }
424 sect = find_section64(v64->hdr, ".text", NULL);
425 if (sect == NULL) {
426 printk(KERN_ERR "vDSO64: the .text section was not found\n");
427 return -1;
428 }
429 v64->text = sect - vdso64_kbase;
430
431 return 0;
432}
433
434static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
435 struct lib64_elfinfo *v64)
436{
437 /*
438 * Find signal trampolines
439 */
440
441 vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64");
442 vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
443 vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
444}
445
446static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
447 struct lib64_elfinfo *v64)
448{
449 Elf32_Sym *sym32;
450 Elf64_Sym *sym64;
451
452 sym32 = find_symbol32(v32, "__kernel_datapage_offset");
453 if (sym32 == NULL) {
454 printk(KERN_ERR "vDSO32: Can't find symbol __kernel_datapage_offset !\n");
455 return -1;
456 }
457 *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
458 (vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE);
459
460 sym64 = find_symbol64(v64, "__kernel_datapage_offset");
461 if (sym64 == NULL) {
462 printk(KERN_ERR "vDSO64: Can't find symbol __kernel_datapage_offset !\n");
463 return -1;
464 }
465 *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
466 (vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE);
467
468 return 0;
469}
470
471static int vdso_do_func_patch32(struct lib32_elfinfo *v32,
472 struct lib64_elfinfo *v64,
473 const char *orig, const char *fix)
474{
475 Elf32_Sym *sym32_gen, *sym32_fix;
476
477 sym32_gen = find_symbol32(v32, orig);
478 if (sym32_gen == NULL) {
479 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
480 return -1;
481 }
482 sym32_fix = find_symbol32(v32, fix);
483 if (sym32_fix == NULL) {
484 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
485 return -1;
486 }
487 sym32_gen->st_value = sym32_fix->st_value;
488 sym32_gen->st_size = sym32_fix->st_size;
489 sym32_gen->st_info = sym32_fix->st_info;
490 sym32_gen->st_other = sym32_fix->st_other;
491 sym32_gen->st_shndx = sym32_fix->st_shndx;
492
493 return 0;
494}
495
496static int vdso_do_func_patch64(struct lib32_elfinfo *v32,
497 struct lib64_elfinfo *v64,
498 const char *orig, const char *fix)
499{
500 Elf64_Sym *sym64_gen, *sym64_fix;
501
502 sym64_gen = find_symbol64(v64, orig);
503 if (sym64_gen == NULL) {
504 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
505 return -1;
506 }
507 sym64_fix = find_symbol64(v64, fix);
508 if (sym64_fix == NULL) {
509 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
510 return -1;
511 }
512 sym64_gen->st_value = sym64_fix->st_value;
513 sym64_gen->st_size = sym64_fix->st_size;
514 sym64_gen->st_info = sym64_fix->st_info;
515 sym64_gen->st_other = sym64_fix->st_other;
516 sym64_gen->st_shndx = sym64_fix->st_shndx;
517
518 return 0;
519}
520
521static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
522 struct lib64_elfinfo *v64)
523{
524 u32 pvr;
525 int i;
526
527 pvr = mfspr(SPRN_PVR);
528 for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
529 struct vdso_patch_def *patch = &vdso_patches[i];
530 int match = (pvr & patch->pvr_mask) == patch->pvr_value;
531
532 DBG("patch %d (mask: %x, pvr: %x) : %s\n",
533 i, patch->pvr_mask, patch->pvr_value, match ? "match" : "skip");
534
535 if (!match)
536 continue;
537
538 DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name);
539
540 /*
541 * Patch the 32 bits and 64 bits symbols. Note that we do not patch
542 * the "." symbol on 64 bits. It would be easy to do, but doesn't
543 * seem to be necessary, patching the OPD symbol is enough.
544 */
545 vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name);
546 vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name);
547 }
548
549 return 0;
550}
551
552
553static __init int vdso_setup(void)
554{
555 struct lib32_elfinfo v32;
556 struct lib64_elfinfo v64;
557
558 v32.hdr = vdso32_kbase;
559 v64.hdr = vdso64_kbase;
560
561 if (vdso_do_find_sections(&v32, &v64))
562 return -1;
563
564 if (vdso_fixup_datapage(&v32, &v64))
565 return -1;
566
567 if (vdso_fixup_alt_funcs(&v32, &v64))
568 return -1;
569
570 vdso_setup_trampolines(&v32, &v64);
571
572 return 0;
573}
574
575void __init vdso_init(void)
576{
577 int i;
578
579 vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
580 vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
581
582 DBG("vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages\n",
583 vdso64_kbase, vdso64_pages, vdso32_kbase, vdso32_pages);
584
585 /*
586 * Initialize the vDSO images in memory, that is do necessary
587 * fixups of vDSO symbols, locate trampolines, etc...
588 */
589 if (vdso_setup()) {
590 printk(KERN_ERR "vDSO setup failure, not enabled !\n");
591 /* XXX should free pages here ? */
592 vdso64_pages = vdso32_pages = 0;
593 return;
594 }
595
596 /* Make sure pages are in the correct state */
597 for (i = 0; i < vdso64_pages; i++) {
598 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
599 ClearPageReserved(pg);
600 get_page(pg);
601 }
602 for (i = 0; i < vdso32_pages; i++) {
603 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
604 ClearPageReserved(pg);
605 get_page(pg);
606 }
607
608 get_page(virt_to_page(_systemcfg));
609}
610
611int in_gate_area_no_task(unsigned long addr)
612{
613 return 0;
614}
615
616int in_gate_area(struct task_struct *task, unsigned long addr)
617{
618 return 0;
619}
620
621struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
622{
623 return NULL;
624}
625
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
deleted file mode 100644
index 022f220e772f..000000000000
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,151 +0,0 @@
1#include <asm/page.h>
2#include <asm-generic/vmlinux.lds.h>
3
4OUTPUT_ARCH(powerpc:common64)
5jiffies = jiffies_64;
6SECTIONS
7{
8 /* Sections to be discarded. */
9 /DISCARD/ : {
10 *(.exitcall.exit)
11 }
12
13
14 /* Read-only sections, merged into text segment: */
15 .text : {
16 *(.text .text.*)
17 SCHED_TEXT
18 LOCK_TEXT
19 KPROBES_TEXT
20 *(.fixup)
21 . = ALIGN(PAGE_SIZE);
22 _etext = .;
23 }
24
25 __ex_table : {
26 __start___ex_table = .;
27 *(__ex_table)
28 __stop___ex_table = .;
29 }
30
31 __bug_table : {
32 __start___bug_table = .;
33 *(__bug_table)
34 __stop___bug_table = .;
35 }
36
37 __ftr_fixup : {
38 __start___ftr_fixup = .;
39 *(__ftr_fixup)
40 __stop___ftr_fixup = .;
41 }
42
43 RODATA
44
45
46 /* will be freed after init */
47 . = ALIGN(PAGE_SIZE);
48 __init_begin = .;
49
50 .init.text : {
51 _sinittext = .;
52 *(.init.text)
53 _einittext = .;
54 }
55
56 .init.data : {
57 *(.init.data)
58 }
59
60 . = ALIGN(16);
61 .init.setup : {
62 __setup_start = .;
63 *(.init.setup)
64 __setup_end = .;
65 }
66
67 .initcall.init : {
68 __initcall_start = .;
69 *(.initcall1.init)
70 *(.initcall2.init)
71 *(.initcall3.init)
72 *(.initcall4.init)
73 *(.initcall5.init)
74 *(.initcall6.init)
75 *(.initcall7.init)
76 __initcall_end = .;
77 }
78
79 .con_initcall.init : {
80 __con_initcall_start = .;
81 *(.con_initcall.init)
82 __con_initcall_end = .;
83 }
84
85 SECURITY_INIT
86
87 . = ALIGN(PAGE_SIZE);
88 .init.ramfs : {
89 __initramfs_start = .;
90 *(.init.ramfs)
91 __initramfs_end = .;
92 }
93
94 .data.percpu : {
95 __per_cpu_start = .;
96 *(.data.percpu)
97 __per_cpu_end = .;
98 }
99
100 . = ALIGN(PAGE_SIZE);
101 . = ALIGN(16384);
102 __init_end = .;
103 /* freed after init ends here */
104
105
106 /* Read/write sections */
107 . = ALIGN(PAGE_SIZE);
108 . = ALIGN(16384);
109 _sdata = .;
110 /* The initial task and kernel stack */
111 .data.init_task : {
112 *(.data.init_task)
113 }
114
115 . = ALIGN(PAGE_SIZE);
116 .data.page_aligned : {
117 *(.data.page_aligned)
118 }
119
120 .data.cacheline_aligned : {
121 *(.data.cacheline_aligned)
122 }
123
124 .data : {
125 *(.data .data.rel* .toc1)
126 *(.branch_lt)
127 }
128
129 .opd : {
130 *(.opd)
131 }
132
133 .got : {
134 __toc_start = .;
135 *(.got)
136 *(.toc)
137 . = ALIGN(PAGE_SIZE);
138 _edata = .;
139 }
140
141
142 . = ALIGN(PAGE_SIZE);
143 .bss : {
144 __bss_start = .;
145 *(.bss)
146 __bss_stop = .;
147 }
148
149 . = ALIGN(PAGE_SIZE);
150 _end = . ;
151}
diff --git a/arch/ppc64/xmon/privinst.h b/arch/ppc64/xmon/privinst.h
deleted file mode 100644
index 02eb40dac0b3..000000000000
--- a/arch/ppc64/xmon/privinst.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#define GETREG(reg) \
11 static inline unsigned long get_ ## reg (void) \
12 { unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
13
14#define SETREG(reg) \
15 static inline void set_ ## reg (unsigned long val) \
16 { asm volatile ("mt" #reg " %0" : : "r" (val)); }
17
18GETREG(msr)
19SETREG(msrd)
20GETREG(cr)
21
22#define GSETSPR(n, name) \
23 static inline long get_ ## name (void) \
24 { long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
25 static inline void set_ ## name (long val) \
26 { asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
27
28GSETSPR(0, mq)
29GSETSPR(1, xer)
30GSETSPR(4, rtcu)
31GSETSPR(5, rtcl)
32GSETSPR(8, lr)
33GSETSPR(9, ctr)
34GSETSPR(18, dsisr)
35GSETSPR(19, dar)
36GSETSPR(22, dec)
37GSETSPR(25, sdr1)
38GSETSPR(26, srr0)
39GSETSPR(27, srr1)
40GSETSPR(272, sprg0)
41GSETSPR(273, sprg1)
42GSETSPR(274, sprg2)
43GSETSPR(275, sprg3)
44GSETSPR(282, ear)
45GSETSPR(287, pvr)
46GSETSPR(1008, hid0)
47GSETSPR(1009, hid1)
48GSETSPR(1010, iabr)
49GSETSPR(1023, pir)
50
51static inline void store_inst(void *p)
52{
53 asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
54}
55
56static inline void cflush(void *p)
57{
58 asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
59}
60
61static inline void cinval(void *p)
62{
63 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
64}